# HG changeset patch # User stuefe # Date 1571232665 -7200 # Node ID 28c7e6711871f5cbf6e9c2a6772dce39f7499c37 # Parent 54c1ba464b782a06fd426a78350f99cfec5c5a0b# Parent 64597a6fd18677f38e40fd43a43ee2fb08b359f1 Merge diff -r 54c1ba464b78 -r 28c7e6711871 .hgtags --- a/.hgtags Mon Oct 07 16:48:42 2019 +0200 +++ b/.hgtags Wed Oct 16 15:31:05 2019 +0200 @@ -590,3 +590,4 @@ 778fc2dcbdaa8981e07e929a2cacef979c72261e jdk-14+15 d29f0181ba424a95d881aba5eabf2e393abcc70f jdk-14+16 5c83830390baafb76a1fbe33443c57620bd45fb9 jdk-14+17 +e84d8379815ba0d3e50fb096d28c25894cb50b8c jdk-14+18 diff -r 54c1ba464b78 -r 28c7e6711871 doc/building.html --- a/doc/building.html Mon Oct 07 16:48:42 2019 +0200 +++ b/doc/building.html Wed Oct 16 15:31:05 2019 +0200 @@ -281,7 +281,7 @@ Linux -gcc 8.2.0 +gcc 8.3.0 macOS @@ -293,14 +293,14 @@ Windows -Microsoft Visual Studio 2017 update 15.9.6 +Microsoft Visual Studio 2017 update 15.9.16

All compilers are expected to be able to compile to the C99 language standard, as some C99 features are used in the source code. Microsoft Visual Studio doesn't fully support C99 so in practice shared code is limited to using C99 features that it does support.

gcc

The minimum accepted version of gcc is 4.8. Older versions will generate a warning by configure and are unlikely to work.

-

The JDK is currently known to be able to compile with at least version 7.4 of gcc.

+

The JDK is currently known to be able to compile with at least version 8.3 of gcc.

In general, any version between these two should be usable.

clang

The minimum accepted version of clang is 3.2. Older versions will not be accepted by configure.

diff -r 54c1ba464b78 -r 28c7e6711871 doc/building.md --- a/doc/building.md Mon Oct 07 16:48:42 2019 +0200 +++ b/doc/building.md Wed Oct 16 15:31:05 2019 +0200 @@ -323,10 +323,10 @@ Operating system Toolchain version ------------------ ------------------------------------------------------- - Linux gcc 8.2.0 + Linux gcc 8.3.0 macOS Apple Xcode 10.1 (using clang 10.0.0) Solaris Oracle Solaris Studio 12.6 (with compiler version 5.15) - Windows Microsoft Visual Studio 2017 update 15.9.6 + Windows Microsoft Visual Studio 2017 update 15.9.16 All compilers are expected to be able to compile to the C99 language standard, as some C99 features are used in the source code. Microsoft Visual Studio @@ -338,7 +338,7 @@ The minimum accepted version of gcc is 4.8. Older versions will generate a warning by `configure` and are unlikely to work. -The JDK is currently known to be able to compile with at least version 7.4 of +The JDK is currently known to be able to compile with at least version 8.3 of gcc. In general, any version between these two should be usable. diff -r 54c1ba464b78 -r 28c7e6711871 make/CreateJmods.gmk --- a/make/CreateJmods.gmk Mon Oct 07 16:48:42 2019 +0200 +++ b/make/CreateJmods.gmk Wed Oct 16 15:31:05 2019 +0200 @@ -86,16 +86,18 @@ # from there. These files were explicitly filtered or modified in -copy # targets. For the rest, just pick up everything from the source legal dirs. LEGAL_NOTICES := \ - $(SUPPORT_OUTPUTDIR)/modules_legal/common \ + $(wildcard $(SUPPORT_OUTPUTDIR)/modules_legal/common) \ $(if $(wildcard $(SUPPORT_OUTPUTDIR)/modules_legal/$(MODULE)), \ $(wildcard $(SUPPORT_OUTPUTDIR)/modules_legal/$(MODULE)), \ $(call FindModuleLegalSrcDirs, $(MODULE)) \ ) -LEGAL_NOTICES_PATH := $(call PathList, $(LEGAL_NOTICES)) -DEPS += $(call FindFiles, $(LEGAL_NOTICES)) +ifneq ($(strip $(LEGAL_NOTICES)), ) + LEGAL_NOTICES_PATH := $(call PathList, $(LEGAL_NOTICES)) + DEPS += $(call FindFiles, $(LEGAL_NOTICES)) -JMOD_FLAGS += --legal-notices $(LEGAL_NOTICES_PATH) + JMOD_FLAGS += --legal-notices $(LEGAL_NOTICES_PATH) +endif ifeq ($(filter-out jdk.incubator.%, $(MODULE)), ) JMOD_FLAGS += --do-not-resolve-by-default diff -r 54c1ba464b78 -r 28c7e6711871 make/RunTestsPrebuilt.gmk --- a/make/RunTestsPrebuilt.gmk Mon Oct 07 16:48:42 2019 +0200 +++ b/make/RunTestsPrebuilt.gmk Wed Oct 16 15:31:05 2019 +0200 @@ -230,7 +230,7 @@ NUM_CORES := $(shell /usr/sbin/sysctl -n hw.ncpu) MEMORY_SIZE := $(shell $(EXPR) `/usr/sbin/sysctl -n hw.memsize` / 1024 / 1024) else ifeq ($(OPENJDK_TARGET_OS), solaris) - NUM_CORES := $(shell LC_MESSAGES=C /usr/sbin/psrinfo -v | $(GREP) -c on-line) + NUM_CORES := $(shell /usr/sbin/psrinfo -v | $(GREP) -c on-line) MEMORY_SIZE := $(shell \ /usr/sbin/prtconf 2> /dev/null | $(GREP) "^Memory [Ss]ize" | $(AWK) '{print $$3}' \ ) diff -r 54c1ba464b78 -r 28c7e6711871 make/RunTestsPrebuiltSpec.gmk --- a/make/RunTestsPrebuiltSpec.gmk Mon Oct 07 16:48:42 2019 +0200 +++ b/make/RunTestsPrebuiltSpec.gmk Wed Oct 16 15:31:05 2019 +0200 @@ -27,6 +27,9 @@ # Fake minimalistic spec file for RunTestsPrebuilt.gmk. ################################################################################ +# Make sure all shell commands are executed with the C locale +export LC_ALL := C + define VerifyVariable ifeq ($$($1), ) $$(info Error: Variable $1 is missing, needed by RunTestPrebuiltSpec.gmk) diff -r 54c1ba464b78 -r 28c7e6711871 make/autoconf/basics.m4 --- a/make/autoconf/basics.m4 Mon Oct 07 16:48:42 2019 +0200 +++ b/make/autoconf/basics.m4 Wed Oct 16 15:31:05 2019 +0200 @@ -427,7 +427,7 @@ # Save the path variable before it gets changed ORIGINAL_PATH="$PATH" AC_SUBST(ORIGINAL_PATH) - DATE_WHEN_CONFIGURED=`LANG=C date` + DATE_WHEN_CONFIGURED=`date` AC_SUBST(DATE_WHEN_CONFIGURED) AC_MSG_NOTICE([Configuration created at $DATE_WHEN_CONFIGURED.]) ]) diff -r 54c1ba464b78 -r 28c7e6711871 make/autoconf/build-performance.m4 --- a/make/autoconf/build-performance.m4 Mon Oct 07 16:48:42 2019 +0200 +++ b/make/autoconf/build-performance.m4 Wed Oct 16 15:31:05 2019 +0200 @@ -35,7 +35,7 @@ FOUND_CORES=yes elif test -x /usr/sbin/psrinfo; then # Looks like a Solaris system - NUM_CORES=`LC_MESSAGES=C /usr/sbin/psrinfo -v | grep -c on-line` + NUM_CORES=`/usr/sbin/psrinfo -v | grep -c on-line` FOUND_CORES=yes elif test -x /usr/sbin/sysctl; then # Looks like a MacOSX system diff -r 54c1ba464b78 -r 28c7e6711871 make/autoconf/configure --- a/make/autoconf/configure Mon Oct 07 16:48:42 2019 +0200 +++ b/make/autoconf/configure Wed Oct 16 15:31:05 2019 +0200 @@ -43,6 +43,9 @@ export CONFIG_SHELL=$BASH export _as_can_reexec=no +# Make sure all shell commands are executed with the C locale +export LC_ALL=C + if test "x$CUSTOM_CONFIG_DIR" != x; then custom_hook=$CUSTOM_CONFIG_DIR/custom-hook.m4 if test ! -e $custom_hook; then diff -r 54c1ba464b78 -r 28c7e6711871 make/autoconf/spec.gmk.in --- a/make/autoconf/spec.gmk.in Mon Oct 07 16:48:42 2019 +0200 +++ b/make/autoconf/spec.gmk.in Wed Oct 16 15:31:05 2019 +0200 @@ -51,6 +51,9 @@ # What make to use for main processing, after bootstrapping top-level Makefile. MAKE := @MAKE@ +# Make sure all shell commands are executed with the C locale +export LC_ALL := C + # The default make arguments MAKE_ARGS = $(MAKE_LOG_FLAGS) -r -R -I $(TOPDIR)/make/common SPEC=$(SPEC) \ MAKE_LOG_FLAGS="$(MAKE_LOG_FLAGS)" $(MAKE_LOG_VARS) diff -r 54c1ba464b78 -r 28c7e6711871 make/autoconf/toolchain_windows.m4 --- a/make/autoconf/toolchain_windows.m4 Mon Oct 07 16:48:42 2019 +0200 +++ b/make/autoconf/toolchain_windows.m4 Wed Oct 16 15:31:05 2019 +0200 @@ -209,6 +209,8 @@ eval SDK_INSTALL_DIR="\${VS_SDK_INSTALLDIR_${VS_VERSION}}" eval VS_ENV_ARGS="\${VS_ENV_ARGS_${VS_VERSION}}" eval VS_TOOLSET_SUPPORTED="\${VS_TOOLSET_SUPPORTED_${VS_VERSION}}" + + VS_ENV_CMD="" # When using --with-tools-dir, assume it points to the correct and default # version of Visual Studio or that --with-toolchain-version was also set. @@ -227,8 +229,6 @@ fi fi - VS_ENV_CMD="" - if test "x$VS_COMNTOOLS" != x; then TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT([${VS_VERSION}], [$VS_COMNTOOLS/../..], [$VS_COMNTOOLS_VAR variable]) diff -r 54c1ba464b78 -r 28c7e6711871 make/common/JavaCompilation.gmk --- a/make/common/JavaCompilation.gmk Mon Oct 07 16:48:42 2019 +0200 +++ b/make/common/JavaCompilation.gmk Wed Oct 16 15:31:05 2019 +0200 @@ -122,7 +122,7 @@ $$($1_BIN)$$($1_MODULE_SUBDIR)$$($2_TARGET) : $2 $$(call LogInfo, Cleaning $$(patsubst $(OUTPUTDIR)/%,%, $$@)) $$(call MakeTargetDir) - export LC_ALL=C ; ( $(CAT) $$< && $(ECHO) "" ) \ + ( $(CAT) $$< && $(ECHO) "" ) \ | $(SED) -e 's/\([^\\]\):/\1\\:/g' -e 's/\([^\\]\)=/\1\\=/g' \ -e 's/\([^\\]\)!/\1\\!/g' -e 's/^[ ]*#.*/#/g' \ | $(SED) -f "$(TOPDIR)/make/common/support/unicode2x.sed" \ diff -r 54c1ba464b78 -r 28c7e6711871 make/conf/jib-profiles.js --- a/make/conf/jib-profiles.js Mon Oct 07 16:48:42 2019 +0200 +++ b/make/conf/jib-profiles.js Wed Oct 16 15:31:05 2019 +0200 @@ -944,11 +944,11 @@ var getJibProfilesDependencies = function (input, common) { var devkit_platform_revisions = { - linux_x64: "gcc8.2.0-OL6.4+1.0", + linux_x64: "gcc8.3.0-OL6.4+1.0", macosx_x64: "Xcode10.1-MacOSX10.14+1.0", solaris_x64: "SS12u4-Solaris11u1+1.0", solaris_sparcv9: "SS12u6-Solaris11u3+1.0", - windows_x64: "VS2017-15.9.6+1.0", + windows_x64: "VS2017-15.9.16+1.0", linux_aarch64: "gcc8.2.0-Fedora27+1.0", linux_arm: "gcc8.2.0-Fedora27+1.0", linux_ppc64le: "gcc8.2.0-Fedora27+1.0", diff -r 54c1ba464b78 -r 28c7e6711871 make/data/charsetmapping/SingleByte-X.java.template --- a/make/data/charsetmapping/SingleByte-X.java.template Mon Oct 07 16:48:42 2019 +0200 +++ b/make/data/charsetmapping/SingleByte-X.java.template Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,7 +48,7 @@ } public CharsetDecoder newDecoder() { - return new SingleByte.Decoder(this, b2c, $ASCIICOMPATIBLE$); + return new SingleByte.Decoder(this, b2c, $ASCIICOMPATIBLE$, $LATIN1DECODABLE$); } public CharsetEncoder newEncoder() { diff -r 54c1ba464b78 -r 28c7e6711871 make/data/lsrdata/language-subtag-registry.txt --- a/make/data/lsrdata/language-subtag-registry.txt Mon Oct 07 16:48:42 2019 +0200 +++ b/make/data/lsrdata/language-subtag-registry.txt Wed Oct 16 15:31:05 2019 +0200 @@ -1,4 +1,4 @@ -File-Date: 2019-04-03 +File-Date: 2019-09-16 %% Type: language Subtag: aa @@ -2096,6 +2096,8 @@ Subtag: ais Description: Nataoran Amis Added: 2009-07-29 +Deprecated: 2019-04-16 +Comments: see ami, szy %% Type: language Subtag: ait @@ -2633,6 +2635,7 @@ Type: language Subtag: ant Description: Antakarinya +Description: Antikarinya Added: 2009-07-29 %% Type: language @@ -3094,6 +3097,8 @@ Subtag: asd Description: Asas Added: 2009-07-29 +Deprecated: 2019-04-16 +Preferred-Value: snz %% Type: language Subtag: ase @@ -4135,7 +4140,7 @@ %% Type: language Subtag: bck -Description: Bunaba +Description: Bunuba Added: 2009-07-29 %% Type: language @@ -6930,7 +6935,7 @@ %% Type: language Subtag: bym -Description: Bidyara +Description: Bidjara Added: 2009-07-29 %% Type: language @@ -7564,6 +7569,11 @@ Added: 2009-07-29 %% Type: language +Subtag: cey +Description: Ekai Chin +Added: 2019-04-16 +%% +Type: language Subtag: cfa Description: Dijim-Bwilim Added: 2009-07-29 @@ -9439,6 +9449,7 @@ Type: language Subtag: dif Description: Dieri +Description: Diyari Added: 2009-07-29 %% Type: language @@ -9515,6 +9526,8 @@ Subtag: dit Description: Dirari Added: 2009-07-29 +Deprecated: 2019-04-29 +Preferred-Value: dif %% Type: language Subtag: diu @@ -9560,6 +9573,7 @@ Type: language Subtag: djd Description: Djamindjung +Description: Ngaliwurru Added: 2009-07-29 %% Type: language @@ -9603,6 +9617,7 @@ %% Type: language Subtag: djn +Description: Jawoyn Description: Djauan Added: 2009-07-29 %% @@ -10191,6 +10206,8 @@ Subtag: dud Description: Hun-Saare Added: 2009-07-29 +Deprecated: 2019-04-16 +Comments: see uth, uss %% Type: language Subtag: due @@ -10382,6 +10399,7 @@ Type: language Subtag: dyn Description: Dyangadi +Description: Dhanggatti Added: 2009-07-29 %% Type: language @@ -10396,6 +10414,7 @@ %% Type: language Subtag: dyy +Description: Djabugay Description: Dyaabugay Added: 2009-07-29 %% @@ -11672,7 +11691,7 @@ %% Type: language Subtag: gbd -Description: Karadjeri +Description: Karajarri Added: 2009-07-29 %% Type: language @@ -12056,7 +12075,7 @@ %% Type: language Subtag: gge -Description: Guragone +Description: Gurr-goni Added: 2009-07-29 %% Type: language @@ -12169,7 +12188,7 @@ %% Type: language Subtag: gia -Description: Kitja +Description: Kija Added: 2009-07-29 %% Type: language @@ -12955,7 +12974,7 @@ %% Type: language Subtag: gue -Description: Gurinji +Description: Gurindji Added: 2009-07-29 %% Type: language @@ -15292,6 +15311,7 @@ Type: language Subtag: jay Description: Yan-nhangu +Description: Nhangu Added: 2009-07-29 %% Type: language @@ -15488,6 +15508,7 @@ %% Type: language Subtag: jig +Description: Jingulu Description: Djingili Added: 2009-07-29 %% @@ -17222,6 +17243,7 @@ Type: language Subtag: kkp Description: Gugubera +Description: Koko-Bera Added: 2009-07-29 %% Type: language @@ -17266,6 +17288,7 @@ %% Type: language Subtag: kky +Description: Guugu Yimidhirr Description: Guguyimidjir Added: 2009-07-29 %% @@ -18320,6 +18343,7 @@ Type: language Subtag: ktd Description: Kokata +Description: Kukatha Added: 2009-07-29 %% Type: language @@ -19341,6 +19365,7 @@ Subtag: lba Description: Lui Added: 2009-07-29 +Deprecated: 2019-04-16 %% Type: language Subtag: lbb @@ -19396,7 +19421,7 @@ %% Type: language Subtag: lbn -Description: Lamet +Description: Rmeet Added: 2009-07-29 %% Type: language @@ -19446,6 +19471,7 @@ %% Type: language Subtag: lby +Description: Lamalama Description: Lamu-Lamu Added: 2009-07-29 %% @@ -20162,6 +20188,8 @@ Subtag: llo Description: Khlor Added: 2009-07-29 +Deprecated: 2019-04-16 +Preferred-Value: ngt %% Type: language Subtag: llp @@ -20654,6 +20682,11 @@ Macrolanguage: luy %% Type: language +Subtag: lsn +Description: Tibetan Sign Language +Added: 2019-04-16 +%% +Type: language Subtag: lso Description: Laos Sign Language Added: 2009-07-29 @@ -20680,6 +20713,11 @@ Added: 2009-07-29 %% Type: language +Subtag: lsv +Description: Sivia Sign Language +Added: 2019-04-16 +%% +Type: language Subtag: lsy Description: Mauritian Sign Language Added: 2010-03-11 @@ -20848,6 +20886,11 @@ Added: 2009-07-29 %% Type: language +Subtag: lvi +Description: Lavi +Added: 2019-04-16 +%% +Type: language Subtag: lvk Description: Lavukaleve Added: 2009-07-29 @@ -21454,7 +21497,7 @@ %% Type: language Subtag: mec -Description: Mara +Description: Marra Added: 2009-07-29 %% Type: language @@ -21523,7 +21566,7 @@ %% Type: language Subtag: mep -Description: Miriwung +Description: Miriwoong Added: 2009-07-29 %% Type: language @@ -21660,7 +21703,7 @@ %% Type: language Subtag: mfr -Description: Marithiel +Description: Marrithiyel Added: 2009-07-29 %% Type: language @@ -22853,12 +22896,13 @@ %% Type: language Subtag: mpb +Description: Malak Malak Description: Mullukmulluk Added: 2009-07-29 %% Type: language Subtag: mpc -Description: Mangarayi +Description: Mangarrayi Added: 2009-07-29 %% Type: language @@ -22889,6 +22933,7 @@ Type: language Subtag: mpj Description: Martu Wangka +Description: Wangkajunga Added: 2009-07-29 %% Type: language @@ -24015,6 +24060,8 @@ Subtag: myd Description: Maramba Added: 2009-07-29 +Deprecated: 2019-04-16 +Preferred-Value: aog %% Type: language Subtag: mye @@ -24040,6 +24087,7 @@ Subtag: myi Description: Mina (India) Added: 2009-07-29 +Deprecated: 2019-04-16 %% Type: language Subtag: myj @@ -24375,7 +24423,7 @@ %% Type: language Subtag: nay -Description: Narrinyeri +Description: Ngarrindjeri Added: 2009-07-29 %% Type: language @@ -24432,7 +24480,7 @@ %% Type: language Subtag: nbj -Description: Ngarinman +Description: Ngarinyman Added: 2009-07-29 %% Type: language @@ -24467,7 +24515,7 @@ %% Type: language Subtag: nbr -Description: Numana-Nunku-Gbantu-Numbu +Description: Numana Added: 2009-07-29 %% Type: language @@ -24559,7 +24607,7 @@ %% Type: language Subtag: nck -Description: Nakara +Description: Na-kara Added: 2009-07-29 %% Type: language @@ -24931,7 +24979,7 @@ %% Type: language Subtag: ngh -Description: Nǀu +Description: Nǁng Added: 2009-07-29 %% Type: language @@ -25176,7 +25224,7 @@ %% Type: language Subtag: nig -Description: Ngalakan +Description: Ngalakgan Added: 2009-07-29 %% Type: language @@ -25798,6 +25846,8 @@ Subtag: nns Description: Ningye Added: 2009-07-29 +Deprecated: 2019-04-16 +Preferred-Value: nbr %% Type: language Subtag: nnt @@ -26658,7 +26708,7 @@ %% Type: language Subtag: nyh -Description: Nyigina +Description: Nyikina Added: 2009-07-29 %% Type: language @@ -26713,7 +26763,7 @@ %% Type: language Subtag: nys -Description: Nyunga +Description: Nyungar Added: 2009-07-29 %% Type: language @@ -28707,6 +28757,11 @@ Added: 2009-07-29 %% Type: language +Subtag: pnd +Description: Mpinda +Added: 2019-04-16 +%% +Type: language Subtag: pne Description: Western Penan Added: 2009-07-29 @@ -28794,6 +28849,7 @@ %% Type: language Subtag: pnw +Description: Banyjima Description: Panytyima Added: 2009-07-29 %% @@ -29251,7 +29307,8 @@ %% Type: language Subtag: pti -Description: Pintiini +Description: Pindiini +Description: Wangkatha Added: 2009-07-29 %% Type: language @@ -30133,6 +30190,7 @@ %% Type: language Subtag: ril +Description: Riang Lang Description: Riang (Myanmar) Added: 2009-07-29 %% @@ -30153,7 +30211,7 @@ %% Type: language Subtag: rit -Description: Ritarungo +Description: Ritharrngu Added: 2009-07-29 %% Type: language @@ -30219,7 +30277,7 @@ %% Type: language Subtag: rmb -Description: Rembarunga +Description: Rembarrnga Added: 2009-07-29 %% Type: language @@ -30641,6 +30699,7 @@ Type: language Subtag: rxw Description: Karuwali +Description: Garuwali Added: 2013-09-10 %% Type: language @@ -32206,7 +32265,7 @@ %% Type: language Subtag: snz -Description: Sinsauru +Description: Kou Added: 2009-07-29 %% Type: language @@ -32883,6 +32942,7 @@ Subtag: suj Description: Shubi Added: 2009-07-29 +Comments: see also xsj %% Type: language Subtag: suk @@ -33312,6 +33372,11 @@ Added: 2009-07-29 %% Type: language +Subtag: szy +Description: Sakizaya +Added: 2019-04-16 +%% +Type: language Subtag: taa Description: Lower Tanana Added: 2009-07-29 @@ -33465,6 +33530,7 @@ %% Type: language Subtag: tbh +Description: Dharawal Description: Thurawal Added: 2009-07-29 %% @@ -33644,6 +33710,7 @@ Type: language Subtag: tcs Description: Torres Strait Creole +Description: Yumplatok Added: 2009-07-29 %% Type: language @@ -34067,6 +34134,7 @@ %% Type: language Subtag: thd +Description: Kuuk Thaayorre Description: Thayore Added: 2009-07-29 %% @@ -34310,6 +34378,11 @@ Added: 2009-07-29 %% Type: language +Subtag: tjj +Description: Tjungundji +Added: 2019-04-16 +%% +Type: language Subtag: tjl Description: Tai Laing Added: 2012-08-12 @@ -34330,6 +34403,11 @@ Added: 2009-07-29 %% Type: language +Subtag: tjp +Description: Tjupany +Added: 2019-04-16 +%% +Type: language Subtag: tjs Description: Southern Tujia Added: 2009-07-29 @@ -35679,6 +35757,11 @@ Added: 2009-07-29 %% Type: language +Subtag: tvx +Description: Taivoan +Added: 2019-04-16 +%% +Type: language Subtag: tvy Description: Timor Pidgin Added: 2009-07-29 @@ -36230,7 +36313,7 @@ %% Type: language Subtag: ulk -Description: Meriam +Description: Meriam Mir Added: 2009-07-29 %% Type: language @@ -36280,6 +36363,7 @@ %% Type: language Subtag: umg +Description: Morrobalama Description: Umbuygamu Added: 2009-07-29 %% @@ -36550,6 +36634,11 @@ Added: 2009-07-29 %% Type: language +Subtag: uss +Description: us-Saare +Added: 2019-04-16 +%% +Type: language Subtag: usu Description: Uya Added: 2009-07-29 @@ -36565,6 +36654,11 @@ Added: 2009-07-29 %% Type: language +Subtag: uth +Description: ut-Hun +Added: 2019-04-16 +%% +Type: language Subtag: utp Description: Amba (Solomon Islands) Added: 2009-07-29 @@ -37178,7 +37272,7 @@ %% Type: language Subtag: waq -Description: Wageman +Description: Wagiman Added: 2009-07-29 %% Type: language @@ -37301,7 +37395,7 @@ %% Type: language Subtag: wbt -Description: Wanman +Description: Warnman Added: 2009-07-29 %% Type: language @@ -37448,6 +37542,7 @@ %% Type: language Subtag: wgg +Description: Wangkangurru Description: Wangganguru Added: 2009-07-29 %% @@ -37521,7 +37616,7 @@ %% Type: language Subtag: wig -Description: Wik-Ngathana +Description: Wik Ngathan Added: 2009-07-29 %% Type: language @@ -37625,6 +37720,11 @@ Added: 2009-07-29 %% Type: language +Subtag: wkr +Description: Keerray-Woorroong +Added: 2019-04-16 +%% +Type: language Subtag: wku Description: Kunduvadi Added: 2009-07-29 @@ -37857,10 +37957,12 @@ Type: language Subtag: wny Description: Wanyi +Description: Waanyi Added: 2012-08-12 %% Type: language Subtag: woa +Description: Kuwema Description: Tyaraity Added: 2009-07-29 %% @@ -37951,6 +38053,7 @@ %% Type: language Subtag: wrb +Description: Waluwarra Description: Warluwara Added: 2009-07-29 %% @@ -37962,11 +38065,12 @@ Type: language Subtag: wrg Description: Warungu +Description: Gudjal Added: 2009-07-29 %% Type: language Subtag: wrh -Description: Wiradhuri +Description: Wiradjuri Added: 2009-07-29 %% Type: language @@ -38439,6 +38543,7 @@ %% Type: language Subtag: xby +Description: Batjala Description: Batyala Added: 2013-09-10 %% @@ -38998,7 +39103,7 @@ %% Type: language Subtag: xmh -Description: Kuku-Muminh +Description: Kugu-Muminh Added: 2009-07-29 %% Type: language @@ -39423,8 +39528,7 @@ Subtag: xsj Description: Subi Added: 2009-07-29 -Deprecated: 2015-02-12 -Preferred-Value: suj +Comments: see also suj %% Type: language Subtag: xsl @@ -40258,6 +40362,7 @@ %% Type: language Subtag: yin +Description: Riang Lai Description: Yinchia Added: 2009-07-29 %% @@ -41562,12 +41667,13 @@ %% Type: language Subtag: zml -Description: Madngele +Description: Matngala Added: 2009-07-29 %% Type: language Subtag: zmm Description: Marimanindji +Description: Marramaninyshi Added: 2009-07-29 %% Type: language @@ -43019,6 +43125,13 @@ Prefix: sgn %% Type: extlang +Subtag: lsn +Description: Tibetan Sign Language +Added: 2019-04-16 +Preferred-Value: lsn +Prefix: sgn +%% +Type: extlang Subtag: lso Description: Laos Sign Language Added: 2009-07-29 @@ -43041,6 +43154,13 @@ Prefix: sgn %% Type: extlang +Subtag: lsv +Description: Sivia Sign Language +Added: 2019-04-16 +Preferred-Value: lsv +Prefix: sgn +%% +Type: extlang Subtag: lsy Description: Mauritian Sign Language Added: 2010-03-11 @@ -43966,6 +44086,11 @@ Added: 2005-10-16 %% Type: script +Subtag: Chrs +Description: Chorasmian +Added: 2019-09-11 +%% +Type: script Subtag: Cirt Description: Cirth Added: 2005-10-16 @@ -44002,6 +44127,11 @@ Added: 2005-10-16 %% Type: script +Subtag: Diak +Description: Dives Akuru +Added: 2019-09-11 +%% +Type: script Subtag: Dogr Description: Dogra Added: 2017-01-13 @@ -44839,6 +44969,11 @@ Added: 2005-10-16 %% Type: script +Subtag: Yezi +Description: Yezidi +Added: 2019-09-11 +%% +Type: script Subtag: Yiii Description: Yi Added: 2005-10-16 @@ -45683,7 +45818,7 @@ %% Type: region Subtag: MK -Description: The Former Yugoslav Republic of Macedonia +Description: North Macedonia Added: 2005-10-16 %% Type: region diff -r 54c1ba464b78 -r 28c7e6711871 make/devkit/Tools.gmk --- a/make/devkit/Tools.gmk Mon Oct 07 16:48:42 2019 +0200 +++ b/make/devkit/Tools.gmk Wed Oct 16 15:31:05 2019 +0200 @@ -79,20 +79,19 @@ # Define external dependencies # Latest that could be made to work. -GCC_VER := 8.2.0 -ifeq ($(GCC_VER), 8.2.0) - gcc_ver := gcc-8.2.0 - binutils_ver := binutils-2.30 - ccache_ver := ccache-3.5.1a - CCACHE_DIRNAME := ccache-3.5.1 +GCC_VER := 8.3.0 +ifeq ($(GCC_VER), 8.3.0) + gcc_ver := gcc-8.3.0 + binutils_ver := binutils-2.32 + ccache_ver := 3.7.3 mpfr_ver := mpfr-3.1.5 gmp_ver := gmp-6.1.2 mpc_ver := mpc-1.0.3 - gdb_ver := gdb-8.2.1 + gdb_ver := gdb-8.3 else ifeq ($(GCC_VER), 7.3.0) gcc_ver := gcc-7.3.0 binutils_ver := binutils-2.30 - ccache_ver := ccache-3.3.6 + ccache_ver := 3.3.6 mpfr_ver := mpfr-3.1.5 gmp_ver := gmp-6.1.2 mpc_ver := mpc-1.0.3 @@ -100,7 +99,7 @@ else ifeq ($(GCC_VER), 4.9.2) gcc_ver := gcc-4.9.2 binutils_ver := binutils-2.25 - ccache_ver := ccache-3.2.1 + ccache_ver := 3.2.1 mpfr_ver := mpfr-3.0.1 gmp_ver := gmp-4.3.2 mpc_ver := mpc-1.0.1 @@ -111,7 +110,7 @@ GCC := http://ftp.gnu.org/pub/gnu/gcc/$(gcc_ver)/$(gcc_ver).tar.xz BINUTILS := http://ftp.gnu.org/pub/gnu/binutils/$(binutils_ver).tar.xz -CCACHE := https://samba.org/ftp/ccache/$(ccache_ver).tar.xz +CCACHE := https://github.com/ccache/ccache/releases/download/v$(ccache_ver)/ccache-$(ccache_ver).tar.xz MPFR := https://www.mpfr.org/${mpfr_ver}/${mpfr_ver}.tar.bz2 GMP := http://ftp.gnu.org/pub/gnu/gmp/${gmp_ver}.tar.bz2 MPC := http://ftp.gnu.org/pub/gnu/mpc/${mpc_ver}.tar.gz diff -r 54c1ba464b78 -r 28c7e6711871 make/devkit/createWindowsDevkit2017.sh --- a/make/devkit/createWindowsDevkit2017.sh Mon Oct 07 16:48:42 2019 +0200 +++ b/make/devkit/createWindowsDevkit2017.sh Wed Oct 16 15:31:05 2019 +0200 @@ -32,10 +32,7 @@ VS_VERSION_NUM_NODOT="150" VS_DLL_VERSION="140" SDK_VERSION="10" -SDK_FULL_VERSION="10.0.16299.0" MSVC_DIR="Microsoft.VC141.CRT" -MSVC_FULL_VERSION="14.12.25827" -REDIST_FULL_VERSION="14.12.25810" SCRIPT_DIR="$(cd "$(dirname $0)" > /dev/null && pwd)" BUILD_DIR="${SCRIPT_DIR}/../../build/devkit" diff -r 54c1ba464b78 -r 28c7e6711871 make/gensrc/Gensrc-jdk.internal.vm.compiler.management.gmk --- a/make/gensrc/Gensrc-jdk.internal.vm.compiler.management.gmk Mon Oct 07 16:48:42 2019 +0200 +++ b/make/gensrc/Gensrc-jdk.internal.vm.compiler.management.gmk Wed Oct 16 15:31:05 2019 +0200 @@ -73,7 +73,7 @@ ($(CD) $(GENSRC_DIR)/META-INF/providers && \ p=""; \ impl=""; \ - for i in $$($(GREP) '^' * | $(SORT) -t ':' -k 2 | $(SED) 's/:.*//'); do \ + for i in $$($(NAWK) '$$0=FILENAME" "$$0' * | $(SORT) -k 2 | $(SED) 's/ .*//'); do \ c=$$($(CAT) $$i | $(TR) -d '\n\r'); \ if test x$$p != x$$c; then \ if test x$$p != x; then \ diff -r 54c1ba464b78 -r 28c7e6711871 make/jdk/src/classes/build/tools/charsetmapping/SBCS.java --- a/make/jdk/src/classes/build/tools/charsetmapping/SBCS.java Mon Oct 07 16:48:42 2019 +0200 +++ b/make/jdk/src/classes/build/tools/charsetmapping/SBCS.java Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,6 +46,7 @@ String hisName = cs.hisName; String pkgName = cs.pkgName; boolean isASCII = cs.isASCII; + boolean isLatin1Decodable = true; StringBuilder b2cSB = new StringBuilder(); StringBuilder b2cNRSB = new StringBuilder(); @@ -69,6 +70,9 @@ c2bOff += 0x100; c2bIndex[e.cp>>8] = 1; } + if (e.cp > 0xFF) { + isLatin1Decodable = false; + } } Formatter fm = new Formatter(b2cSB); @@ -178,6 +182,9 @@ if (line.indexOf("$ASCIICOMPATIBLE$") != -1) { line = line.replace("$ASCIICOMPATIBLE$", isASCII ? "true" : "false"); } + if (line.indexOf("$LATIN1DECODABLE$") != -1) { + line = line.replace("$LATIN1DECODABLE$", isLatin1Decodable ? "true" : "false"); + } if (line.indexOf("$B2CTABLE$") != -1) { line = line.replace("$B2CTABLE$", b2c); } diff -r 54c1ba464b78 -r 28c7e6711871 make/lib/CoreLibraries.gmk --- a/make/lib/CoreLibraries.gmk Mon Oct 07 16:48:42 2019 +0200 +++ b/make/lib/CoreLibraries.gmk Wed Oct 16 15:31:05 2019 +0200 @@ -23,8 +23,6 @@ # questions. # -WIN_VERIFY_LIB := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libverify/verify.lib - # Hook to include the corresponding custom file, if present. $(eval $(call IncludeCustomExtension, lib/CoreLibraries.gmk)) @@ -110,14 +108,14 @@ LDFLAGS_macosx := -L$(SUPPORT_OUTPUTDIR)/native/$(MODULE)/, \ LDFLAGS_windows := -delayload:shell32.dll, \ LIBS := $(BUILD_LIBFDLIBM_TARGET), \ - LIBS_unix := -ljvm -lverify, \ + LIBS_unix := -ljvm, \ LIBS_linux := $(LIBDL), \ LIBS_solaris := -lsocket -lnsl -lscf $(LIBDL), \ LIBS_aix := $(LIBDL) $(LIBM),\ LIBS_macosx := -framework CoreFoundation \ -framework Foundation \ -framework SystemConfiguration, \ - LIBS_windows := jvm.lib $(WIN_VERIFY_LIB) \ + LIBS_windows := jvm.lib \ shell32.lib delayimp.lib \ advapi32.lib version.lib, \ )) diff -r 54c1ba464b78 -r 28c7e6711871 make/scripts/compare.sh --- a/make/scripts/compare.sh Mon Oct 07 16:48:42 2019 +0200 +++ b/make/scripts/compare.sh Wed Oct 16 15:31:05 2019 +0200 @@ -34,6 +34,9 @@ exit 1 fi +# Make sure all shell commands are executed with the C locale +export LC_ALL=C + if [ "$OPENJDK_TARGET_OS" = "macosx" ]; then FULLDUMP_CMD="$OTOOL -v -V -h -X -d" LDD_CMD="$OTOOL -L" @@ -110,7 +113,7 @@ " fi elif [ "$OPENJDK_TARGET_OS" = "macosx" ]; then - DIS_DIFF_FILTER="LANG=C $SED \ + DIS_DIFF_FILTER="$SED \ -e 's/0x[0-9a-f]\{3,16\}//g' -e 's/^[0-9a-f]\{12,20\}//' \ -e 's/-20[0-9][0-9]-[0-1][0-9]-[0-3][0-9]-[0-2][0-9]\{5\}//g' \ -e 's/), built on .*/), /' \ @@ -134,7 +137,7 @@ if [[ "$THIS_FILE" = *"META-INF/MANIFEST.MF" ]]; then # Filter out date string, ant version and java version differences. - TMP=$(LC_ALL=C $DIFF $OTHER_FILE $THIS_FILE | \ + TMP=$($DIFF $OTHER_FILE $THIS_FILE | \ $GREP '^[<>]' | \ $SED -e '/[<>] Ant-Version: Apache Ant .*/d' \ -e '/[<>] Created-By: .* (Oracle [Corpatin)]*/d' \ @@ -142,7 +145,7 @@ -e '/[<>].*[0-9]\{4\}_[0-9]\{2\}_[0-9]\{2\}_[0-9]\{2\}_[0-9]\{2\}-b[0-9]\{2\}.*/d') fi if test "x$SUFFIX" = "xjava"; then - TMP=$(LC_ALL=C $DIFF $OTHER_FILE $THIS_FILE | \ + TMP=$($DIFF $OTHER_FILE $THIS_FILE | \ $GREP '^[<>]' | \ $SED -e '/[<>] \* from.*\.idl/d' \ -e '/[<>] .*[0-9]\{4\}_[0-9]\{2\}_[0-9]\{2\}_[0-9]\{2\}_[0-9]\{2\}-b[0-9]\{2\}.*/d' \ @@ -197,7 +200,7 @@ fi if test "x$SUFFIX" = "xproperties"; then # Filter out date string differences. - TMP=$(LC_ALL=C $DIFF $OTHER_FILE $THIS_FILE | \ + TMP=$($DIFF $OTHER_FILE $THIS_FILE | \ $GREP '^[<>]' | \ $SED -e '/[<>].*[0-9]\{4\}-[0-9]\{2\}-[0-9]\{2\}-[0-9]\{6\}.*/d') fi @@ -207,7 +210,7 @@ -e 's///g'" $CAT $THIS_FILE | eval "$HTML_FILTER" > $THIS_FILE.filtered $CAT $OTHER_FILE | eval "$HTML_FILTER" > $OTHER_FILE.filtered - TMP=$(LC_ALL=C $DIFF $OTHER_FILE.filtered $THIS_FILE.filtered | \ + TMP=$($DIFF $OTHER_FILE.filtered $THIS_FILE.filtered | \ $GREP '^[<>]' | \ $SED -e '/[<>] /d' \ -e '/[<>] /d' ) @@ -554,11 +557,11 @@ CONTENTS_DIFF_FILE=$WORK_DIR/$ZIP_FILE.diff # On solaris, there is no -q option. if [ "$OPENJDK_TARGET_OS" = "solaris" ]; then - LC_ALL=C $DIFF -r $OTHER_UNZIPDIR $THIS_UNZIPDIR \ + $DIFF -r $OTHER_UNZIPDIR $THIS_UNZIPDIR \ | $GREP -v -e "^<" -e "^>" -e "^Common subdirectories:" \ > $CONTENTS_DIFF_FILE else - LC_ALL=C $DIFF -rq $OTHER_UNZIPDIR $THIS_UNZIPDIR > $CONTENTS_DIFF_FILE + $DIFF -rq $OTHER_UNZIPDIR $THIS_UNZIPDIR > $CONTENTS_DIFF_FILE fi ONLY_OTHER=$($GREP "^Only in $OTHER_UNZIPDIR" $CONTENTS_DIFF_FILE) @@ -605,11 +608,11 @@ if [ -n "$SHOW_DIFFS" ]; then for i in $(cat $WORK_DIR/$ZIP_FILE.difflist) ; do if [ -f "${OTHER_UNZIPDIR}/$i.javap" ]; then - LC_ALL=C $DIFF ${OTHER_UNZIPDIR}/$i.javap ${THIS_UNZIPDIR}/$i.javap + $DIFF ${OTHER_UNZIPDIR}/$i.javap ${THIS_UNZIPDIR}/$i.javap elif [ -f "${OTHER_UNZIPDIR}/$i.cleaned" ]; then - LC_ALL=C $DIFF ${OTHER_UNZIPDIR}/$i.cleaned ${THIS_UNZIPDIR}/$i + $DIFF ${OTHER_UNZIPDIR}/$i.cleaned ${THIS_UNZIPDIR}/$i else - LC_ALL=C $DIFF ${OTHER_UNZIPDIR}/$i ${THIS_UNZIPDIR}/$i + $DIFF ${OTHER_UNZIPDIR}/$i ${THIS_UNZIPDIR}/$i fi done fi @@ -642,7 +645,7 @@ $JMOD list $THIS_JMOD | sort > $THIS_JMOD_LIST $JMOD list $OTHER_JMOD | sort > $OTHER_JMOD_LIST JMOD_LIST_DIFF_FILE=$WORK_DIR/$JMOD_FILE.list.diff - LC_ALL=C $DIFF $THIS_JMOD_LIST $OTHER_JMOD_LIST > $JMOD_LIST_DIFF_FILE + $DIFF $THIS_JMOD_LIST $OTHER_JMOD_LIST > $JMOD_LIST_DIFF_FILE ONLY_THIS=$($GREP "^<" $JMOD_LIST_DIFF_FILE) ONLY_OTHER=$($GREP "^>" $JMOD_LIST_DIFF_FILE) @@ -924,7 +927,7 @@ > $WORK_FILE_BASE.symbols.this fi - LC_ALL=C $DIFF $WORK_FILE_BASE.symbols.other $WORK_FILE_BASE.symbols.this > $WORK_FILE_BASE.symbols.diff + $DIFF $WORK_FILE_BASE.symbols.other $WORK_FILE_BASE.symbols.this > $WORK_FILE_BASE.symbols.diff if [ -s $WORK_FILE_BASE.symbols.diff ]; then SYM_MSG=" diff " if [[ "$ACCEPTED_SYM_DIFF" != *"$BIN_FILE"* ]]; then @@ -964,9 +967,9 @@ | $UNIQ > $WORK_FILE_BASE.deps.this.uniq) (cd $FILE_WORK_DIR && $RM -f $NAME) - LC_ALL=C $DIFF $WORK_FILE_BASE.deps.other $WORK_FILE_BASE.deps.this \ + $DIFF $WORK_FILE_BASE.deps.other $WORK_FILE_BASE.deps.this \ > $WORK_FILE_BASE.deps.diff - LC_ALL=C $DIFF $WORK_FILE_BASE.deps.other.uniq $WORK_FILE_BASE.deps.this.uniq \ + $DIFF $WORK_FILE_BASE.deps.other.uniq $WORK_FILE_BASE.deps.this.uniq \ > $WORK_FILE_BASE.deps.diff.uniq if [ -s $WORK_FILE_BASE.deps.diff ]; then @@ -1016,7 +1019,7 @@ > $WORK_FILE_BASE.fulldump.this 2>&1 & wait - LC_ALL=C $DIFF $WORK_FILE_BASE.fulldump.other $WORK_FILE_BASE.fulldump.this \ + $DIFF $WORK_FILE_BASE.fulldump.other $WORK_FILE_BASE.fulldump.this \ > $WORK_FILE_BASE.fulldump.diff if [ -s $WORK_FILE_BASE.fulldump.diff ]; then @@ -1063,7 +1066,7 @@ | eval "$this_DIS_DIFF_FILTER" > $WORK_FILE_BASE.dis.this 2>&1 & wait - LC_ALL=C $DIFF $WORK_FILE_BASE.dis.other $WORK_FILE_BASE.dis.this > $WORK_FILE_BASE.dis.diff + $DIFF $WORK_FILE_BASE.dis.other $WORK_FILE_BASE.dis.this > $WORK_FILE_BASE.dis.diff if [ -s $WORK_FILE_BASE.dis.diff ]; then DIS_DIFF_SIZE=$(ls -n $WORK_FILE_BASE.dis.diff | awk '{print $5}') diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/aarch64/aarch64.ad --- a/src/hotspot/cpu/aarch64/aarch64.ad Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/aarch64/aarch64.ad Wed Oct 16 15:31:05 2019 +0200 @@ -2513,17 +2513,8 @@ __ INSN(REG, as_Register(BASE)); \ } -typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr); -typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr); -typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt, - MacroAssembler::SIMD_RegVariant T, const Address &adr); - - // Used for all non-volatile memory accesses. The use of - // $mem->opcode() to discover whether this pattern uses sign-extended - // offsets is something of a kludge. - static void loadStore(MacroAssembler masm, mem_insn insn, - Register reg, int opcode, - Register base, int index, int size, int disp) + +static Address mem2address(int opcode, Register base, int index, int size, int disp) { Address::extend scale; @@ -2542,16 +2533,34 @@ } if (index == -1) { - (masm.*insn)(reg, Address(base, disp)); + return Address(base, disp); } else { assert(disp == 0, "unsupported address mode: disp = %d", disp); - (masm.*insn)(reg, Address(base, as_Register(index), scale)); + return Address(base, as_Register(index), scale); } } + +typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr); +typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr); +typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr); +typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt, + MacroAssembler::SIMD_RegVariant T, const Address &adr); + + // Used for all non-volatile memory accesses. The use of + // $mem->opcode() to discover whether this pattern uses sign-extended + // offsets is something of a kludge. + static void loadStore(MacroAssembler masm, mem_insn insn, + Register reg, int opcode, + Register base, int index, int size, int disp) + { + Address addr = mem2address(opcode, base, index, size, disp); + (masm.*insn)(reg, addr); + } + static void loadStore(MacroAssembler masm, mem_float_insn insn, - FloatRegister reg, int opcode, - Register base, int index, int size, int disp) + FloatRegister reg, int opcode, + Register base, int index, int size, int disp) { Address::extend scale; @@ -2573,8 +2582,8 @@ } static void loadStore(MacroAssembler masm, mem_vector_insn insn, - FloatRegister reg, MacroAssembler::SIMD_RegVariant T, - int opcode, Register base, int index, int size, int disp) + FloatRegister reg, MacroAssembler::SIMD_RegVariant T, + int opcode, Register base, int index, int size, int disp) { if (index == -1) { (masm.*insn)(reg, T, Address(base, disp)); @@ -3791,7 +3800,7 @@ static const int hi[Op_RegL + 1] = { // enum name 0, // Op_Node 0, // Op_Set - OptoReg::Bad, // Op_RegN + OptoReg::Bad, // Op_RegN OptoReg::Bad, // Op_RegI R0_H_num, // Op_RegP OptoReg::Bad, // Op_RegF @@ -6923,7 +6932,7 @@ instruct loadP(iRegPNoSp dst, memory mem) %{ match(Set dst (LoadP mem)); - predicate(!needs_acquiring_load(n)); + predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0)); ins_cost(4 * INSN_COST); format %{ "ldr $dst, $mem\t# ptr" %} @@ -7616,6 +7625,7 @@ instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem) %{ match(Set dst (LoadP mem)); + predicate(n->as_Load()->barrier_data() == 0); ins_cost(VOLATILE_REF_COST); format %{ "ldar $dst, $mem\t# ptr" %} @@ -8552,6 +8562,7 @@ instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ match(Set res (CompareAndSwapP mem (Binary oldval newval))); + predicate(n->as_LoadStore()->barrier_data() == 0); ins_cost(2 * VOLATILE_REF_COST); effect(KILL cr); @@ -8665,7 +8676,7 @@ instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ - predicate(needs_acquiring_load_exclusive(n)); + predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0)); match(Set res (CompareAndSwapP mem (Binary oldval newval))); ins_cost(VOLATILE_REF_COST); @@ -8796,6 +8807,7 @@ %} instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ + predicate(n->as_LoadStore()->barrier_data() == 0); match(Set res (CompareAndExchangeP mem (Binary oldval newval))); ins_cost(2 * VOLATILE_REF_COST); effect(TEMP_DEF res, KILL cr); @@ -8895,7 +8907,7 @@ %} instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ - predicate(needs_acquiring_load_exclusive(n)); + predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0)); match(Set res (CompareAndExchangeP mem (Binary oldval newval))); ins_cost(VOLATILE_REF_COST); effect(TEMP_DEF res, KILL cr); @@ -8996,6 +9008,7 @@ %} instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ + predicate(n->as_LoadStore()->barrier_data() == 0); match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); ins_cost(2 * VOLATILE_REF_COST); effect(KILL cr); @@ -9103,8 +9116,8 @@ %} instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ - predicate(needs_acquiring_load_exclusive(n)); match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); + predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0)); ins_cost(VOLATILE_REF_COST); effect(KILL cr); format %{ @@ -9154,6 +9167,7 @@ %} instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{ + predicate(n->as_LoadStore()->barrier_data() == 0); match(Set prev (GetAndSetP mem newv)); ins_cost(2 * VOLATILE_REF_COST); format %{ "atomic_xchg $prev, $newv, [$mem]" %} @@ -9197,7 +9211,7 @@ %} instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{ - predicate(needs_acquiring_load_exclusive(n)); + predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0)); match(Set prev (GetAndSetP mem newv)); ins_cost(VOLATILE_REF_COST); format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %} diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/aarch64/abstractInterpreter_aarch64.cpp --- a/src/hotspot/cpu/aarch64/abstractInterpreter_aarch64.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/aarch64/abstractInterpreter_aarch64.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -26,6 +26,7 @@ #include "precompiled.hpp" #include "interpreter/interpreter.hpp" #include "oops/constMethod.hpp" +#include "oops/klass.inline.hpp" #include "oops/method.hpp" #include "runtime/frame.inline.hpp" #include "utilities/align.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp --- a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -162,16 +162,12 @@ // Creation also verifies the object. NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeInstruction::instruction_size); -#ifndef PRODUCT - NativeGeneralJump* jump = nativeGeneralJump_at(method_holder->next_instruction_address()); - // read the value once - volatile intptr_t data = method_holder->data(); - assert(data == 0 || data == (intptr_t)callee(), - "a) MT-unsafe modification of inline cache"); - assert(data == 0 || jump->jump_destination() == entry, - "b) MT-unsafe modification of inline cache"); +#ifdef ASSERT + NativeGeneralJump* jump = nativeGeneralJump_at(method_holder->next_instruction_address()); + verify_mt_safe(callee, entry, method_holder, jump); #endif + // Update stub. method_holder->set_data((intptr_t)callee()); NativeGeneralJump::insert_unconditional(method_holder->next_instruction_address(), entry); @@ -189,6 +185,10 @@ NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeInstruction::instruction_size); method_holder->set_data(0); + if (!static_stub->is_aot()) { + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); + jump->set_jump_destination((address)-1); + } } //----------------------------------------------------------------------------- diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -24,22 +24,23 @@ #include "precompiled.hpp" #include "asm/macroAssembler.inline.hpp" #include "code/codeBlob.hpp" +#include "code/vmreg.inline.hpp" #include "gc/z/zBarrier.inline.hpp" #include "gc/z/zBarrierSet.hpp" #include "gc/z/zBarrierSetAssembler.hpp" #include "gc/z/zBarrierSetRuntime.hpp" +#include "gc/z/zThreadLocalData.hpp" #include "memory/resourceArea.hpp" +#include "runtime/sharedRuntime.hpp" +#include "utilities/macros.hpp" #ifdef COMPILER1 #include "c1/c1_LIRAssembler.hpp" #include "c1/c1_MacroAssembler.hpp" #include "gc/z/c1/zBarrierSetC1.hpp" #endif // COMPILER1 - -#include "gc/z/zThreadLocalData.hpp" - -ZBarrierSetAssembler::ZBarrierSetAssembler() : - _load_barrier_slow_stub(), - _load_barrier_weak_slow_stub() {} +#ifdef COMPILER2 +#include "gc/z/c2/zBarrierSetC2.hpp" +#endif // COMPILER2 #ifdef PRODUCT #define BLOCK_COMMENT(str) /* nothing */ @@ -66,7 +67,7 @@ assert_different_registers(rscratch1, rscratch2, src.base()); assert_different_registers(rscratch1, rscratch2, dst); - RegSet savedRegs = RegSet::range(r0,r28) - RegSet::of(dst, rscratch1, rscratch2); + RegSet savedRegs = RegSet::range(r0, r28) - RegSet::of(dst, rscratch1, rscratch2); Label done; @@ -206,7 +207,8 @@ // The Address offset is too large to direct load - -784. Our range is +127, -128. __ mov(tmp, (long int)(in_bytes(ZThreadLocalData::address_bad_mask_offset()) - - in_bytes(JavaThread::jni_environment_offset()))); + in_bytes(JavaThread::jni_environment_offset()))); + // Load address bad mask __ add(tmp, jni_env, tmp); __ ldr(tmp, Address(tmp)); @@ -294,12 +296,12 @@ __ prologue("zgc_load_barrier stub", false); // We don't use push/pop_clobbered_registers() - we need to pull out the result from r0. - for (int i = 0; i < 32; i +=2) { - __ stpd(as_FloatRegister(i), as_FloatRegister(i+1), Address(__ pre(sp,-16))); + for (int i = 0; i < 32; i += 2) { + __ stpd(as_FloatRegister(i), as_FloatRegister(i + 1), Address(__ pre(sp,-16))); } - RegSet saveRegs = RegSet::range(r0,r28) - RegSet::of(r0); - __ push(saveRegs, sp); + const RegSet save_regs = RegSet::range(r1, r28); + __ push(save_regs, sp); // Setup arguments __ load_parameter(0, c_rarg0); @@ -307,98 +309,161 @@ __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), 2); - __ pop(saveRegs, sp); + __ pop(save_regs, sp); - for (int i = 30; i >0; i -=2) { - __ ldpd(as_FloatRegister(i), as_FloatRegister(i+1), Address(__ post(sp, 16))); - } + for (int i = 30; i >= 0; i -= 2) { + __ ldpd(as_FloatRegister(i), as_FloatRegister(i + 1), Address(__ post(sp, 16))); + } __ epilogue(); } #endif // COMPILER1 +#ifdef COMPILER2 + +OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) { + if (!OptoReg::is_reg(opto_reg)) { + return OptoReg::Bad; + } + + const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); + if (vm_reg->is_FloatRegister()) { + return opto_reg & ~1; + } + + return opto_reg; +} + #undef __ -#define __ cgen->assembler()-> +#define __ _masm-> + +class ZSaveLiveRegisters { +private: + MacroAssembler* const _masm; + RegSet _gp_regs; + RegSet _fp_regs; + +public: + void initialize(ZLoadBarrierStubC2* stub) { + // Create mask of live registers + RegMask live = stub->live(); -// Generates a register specific stub for calling -// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or -// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded(). -// -// The raddr register serves as both input and output for this stub. When the stub is -// called the raddr register contains the object field address (oop*) where the bad oop -// was loaded from, which caused the slow path to be taken. On return from the stub the -// raddr register contains the good/healed oop returned from -// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or -// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded(). -static address generate_load_barrier_stub(StubCodeGenerator* cgen, Register raddr, DecoratorSet decorators) { - // Don't generate stub for invalid registers - if (raddr == zr || raddr == r29 || raddr == r30) { - return NULL; + // Record registers that needs to be saved/restored + while (live.is_NotEmpty()) { + const OptoReg::Name opto_reg = live.find_first_elem(); + live.Remove(opto_reg); + if (OptoReg::is_reg(opto_reg)) { + const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); + if (vm_reg->is_Register()) { + _gp_regs += RegSet::of(vm_reg->as_Register()); + } else if (vm_reg->is_FloatRegister()) { + _fp_regs += RegSet::of((Register)vm_reg->as_FloatRegister()); + } else { + fatal("Unknown register type"); + } + } + } + + // Remove C-ABI SOE registers, scratch regs and _ref register that will be updated + _gp_regs -= RegSet::range(r19, r30) + RegSet::of(r8, r9, stub->ref()); + } + + ZSaveLiveRegisters(MacroAssembler* masm, ZLoadBarrierStubC2* stub) : + _masm(masm), + _gp_regs(), + _fp_regs() { + + // Figure out what registers to save/restore + initialize(stub); + + // Save registers + __ push(_gp_regs, sp); + __ push_fp(_fp_regs, sp); } - // Create stub name - char name[64]; - const bool weak = (decorators & ON_WEAK_OOP_REF) != 0; - os::snprintf(name, sizeof(name), "zgc_load_barrier%s_stub_%s", weak ? "_weak" : "", raddr->name()); + ~ZSaveLiveRegisters() { + // Restore registers + __ pop_fp(_fp_regs, sp); + __ pop(_gp_regs, sp); + } +}; + +#undef __ +#define __ _masm-> - __ align(CodeEntryAlignment); - StubCodeMark mark(cgen, "StubRoutines", os::strdup(name, mtCode)); - address start = __ pc(); +class ZSetupArguments { +private: + MacroAssembler* const _masm; + const Register _ref; + const Address _ref_addr; + +public: + ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) : + _masm(masm), + _ref(stub->ref()), + _ref_addr(stub->ref_addr()) { - // Save live registers - RegSet savedRegs = RegSet::range(r0,r18) - RegSet::of(raddr); - - __ enter(); - __ push(savedRegs, sp); - - // Setup arguments - if (raddr != c_rarg1) { - __ mov(c_rarg1, raddr); + // Setup arguments + if (_ref_addr.base() == noreg) { + // No self healing + if (_ref != c_rarg0) { + __ mov(c_rarg0, _ref); + } + __ mov(c_rarg1, 0); + } else { + // Self healing + if (_ref == c_rarg0) { + // _ref is already at correct place + __ lea(c_rarg1, _ref_addr); + } else if (_ref != c_rarg1) { + // _ref is in wrong place, but not in c_rarg1, so fix it first + __ lea(c_rarg1, _ref_addr); + __ mov(c_rarg0, _ref); + } else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) { + assert(_ref == c_rarg1, "Mov ref first, vacating c_rarg0"); + __ mov(c_rarg0, _ref); + __ lea(c_rarg1, _ref_addr); + } else { + assert(_ref == c_rarg1, "Need to vacate c_rarg1 and _ref_addr is using c_rarg0"); + if (_ref_addr.base() == c_rarg0 || _ref_addr.index() == c_rarg0) { + __ mov(rscratch2, c_rarg1); + __ lea(c_rarg1, _ref_addr); + __ mov(c_rarg0, rscratch2); + } else { + ShouldNotReachHere(); + } + } + } } - __ ldr(c_rarg0, Address(raddr)); + ~ZSetupArguments() { + // Transfer result + if (_ref != r0) { + __ mov(_ref, r0); + } + } +}; + +#undef __ +#define __ masm-> - // Call barrier function - __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1); +void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const { + BLOCK_COMMENT("ZLoadBarrierStubC2"); + + // Stub entry + __ bind(*stub->entry()); - // Move result returned in r0 to raddr, if needed - if (raddr != r0) { - __ mov(raddr, r0); + { + ZSaveLiveRegisters save_live_registers(masm, stub); + ZSetupArguments setup_arguments(masm, stub); + __ mov(rscratch1, stub->slow_path()); + __ blr(rscratch1); } - __ pop(savedRegs, sp); - __ leave(); - __ ret(lr); - - return start; + // Stub exit + __ b(*stub->continuation()); } #undef __ -static void barrier_stubs_init_inner(const char* label, const DecoratorSet decorators, address* stub) { - const int nregs = 28; // Exclude FP, XZR, SP from calculation. - const int code_size = nregs * 254; // Rough estimate of code size - - ResourceMark rm; - - CodeBuffer buf(BufferBlob::create(label, code_size)); - StubCodeGenerator cgen(&buf); - - for (int i = 0; i < nregs; i++) { - const Register reg = as_Register(i); - stub[i] = generate_load_barrier_stub(&cgen, reg, decorators); - } -} - -void ZBarrierSetAssembler::barrier_stubs_init() { - barrier_stubs_init_inner("zgc_load_barrier_stubs", ON_STRONG_OOP_REF, _load_barrier_slow_stub); - barrier_stubs_init_inner("zgc_load_barrier_weak_stubs", ON_WEAK_OOP_REF, _load_barrier_weak_slow_stub); -} - -address ZBarrierSetAssembler::load_barrier_slow_stub(Register reg) { - return _load_barrier_slow_stub[reg->encoding()]; -} - -address ZBarrierSetAssembler::load_barrier_weak_slow_stub(Register reg) { - return _load_barrier_weak_slow_stub[reg->encoding()]; -} +#endif // COMPILER2 diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp --- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -24,6 +24,12 @@ #ifndef CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP #define CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP +#include "code/vmreg.hpp" +#include "oops/accessDecorators.hpp" +#ifdef COMPILER2 +#include "opto/optoreg.hpp" +#endif // COMPILER2 + #ifdef COMPILER1 class LIR_Assembler; class LIR_OprDesc; @@ -32,14 +38,13 @@ class ZLoadBarrierStubC1; #endif // COMPILER1 +#ifdef COMPILER2 +class Node; +class ZLoadBarrierStubC2; +#endif // COMPILER2 + class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { -private: - address _load_barrier_slow_stub[RegisterImpl::number_of_registers]; - address _load_barrier_weak_slow_stub[RegisterImpl::number_of_registers]; - public: - ZBarrierSetAssembler(); - virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, @@ -83,10 +88,13 @@ DecoratorSet decorators) const; #endif // COMPILER1 - virtual void barrier_stubs_init(); +#ifdef COMPILER2 + OptoReg::Name refine_register(const Node* node, + OptoReg::Name opto_reg); - address load_barrier_slow_stub(Register reg); - address load_barrier_weak_slow_stub(Register reg); + void generate_c2_load_barrier_stub(MacroAssembler* masm, + ZLoadBarrierStubC2* stub) const; +#endif // COMPILER2 }; #endif // CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad --- a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad Wed Oct 16 15:31:05 2019 +0200 @@ -24,155 +24,244 @@ source_hpp %{ #include "gc/z/c2/zBarrierSetC2.hpp" +#include "gc/z/zThreadLocalData.hpp" %} source %{ -#include "gc/z/zBarrierSetAssembler.hpp" - -static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst, - Register base, int index, int scale, - int disp, bool weak) { - const address stub = weak ? ZBarrierSet::assembler()->load_barrier_weak_slow_stub(dst) - : ZBarrierSet::assembler()->load_barrier_slow_stub(dst); +static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak) { + ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, weak); + __ ldr(tmp, Address(rthread, ZThreadLocalData::address_bad_mask_offset())); + __ andr(tmp, tmp, ref); + __ cbnz(tmp, *stub->entry()); + __ bind(*stub->continuation()); +} - if (index == -1) { - if (disp != 0) { - __ lea(dst, Address(base, disp)); - } else { - __ mov(dst, base); - } - } else { - Register index_reg = as_Register(index); - if (disp == 0) { - __ lea(dst, Address(base, index_reg, Address::lsl(scale))); - } else { - __ lea(dst, Address(base, disp)); - __ lea(dst, Address(dst, index_reg, Address::lsl(scale))); - } - } - - __ far_call(RuntimeAddress(stub)); +static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) { + ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, false /* weak */); + __ b(*stub->entry()); + __ bind(*stub->continuation()); } %} -// -// Execute ZGC load barrier (strong) slow path -// -instruct loadBarrierSlowReg(iRegP dst, memory src, rFlagsReg cr, - vRegD_V0 v0, vRegD_V1 v1, vRegD_V2 v2, vRegD_V3 v3, vRegD_V4 v4, - vRegD_V5 v5, vRegD_V6 v6, vRegD_V7 v7, vRegD_V8 v8, vRegD_V9 v9, - vRegD_V10 v10, vRegD_V11 v11, vRegD_V12 v12, vRegD_V13 v13, vRegD_V14 v14, - vRegD_V15 v15, vRegD_V16 v16, vRegD_V17 v17, vRegD_V18 v18, vRegD_V19 v19, - vRegD_V20 v20, vRegD_V21 v21, vRegD_V22 v22, vRegD_V23 v23, vRegD_V24 v24, - vRegD_V25 v25, vRegD_V26 v26, vRegD_V27 v27, vRegD_V28 v28, vRegD_V29 v29, - vRegD_V30 v30, vRegD_V31 v31) %{ - match(Set dst (LoadBarrierSlowReg src dst)); - predicate(!n->as_LoadBarrierSlowReg()->is_weak()); +// Load Pointer +instruct zLoadP(iRegPNoSp dst, memory mem, rFlagsReg cr) +%{ + match(Set dst (LoadP mem)); + predicate(UseZGC && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() == ZLoadBarrierStrong)); + effect(TEMP dst, KILL cr); - effect(KILL cr, - KILL v0, KILL v1, KILL v2, KILL v3, KILL v4, KILL v5, KILL v6, KILL v7, - KILL v8, KILL v9, KILL v10, KILL v11, KILL v12, KILL v13, KILL v14, - KILL v15, KILL v16, KILL v17, KILL v18, KILL v19, KILL v20, KILL v21, - KILL v22, KILL v23, KILL v24, KILL v25, KILL v26, KILL v27, KILL v28, - KILL v29, KILL v30, KILL v31); + ins_cost(4 * INSN_COST); - format %{ "lea $dst, $src\n\t" - "call #ZLoadBarrierSlowPath" %} + format %{ "ldr $dst, $mem" %} ins_encode %{ - z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$base$$Register, - $src$$index, $src$$scale, $src$$disp, false); + const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); + __ ldr($dst$$Register, ref_addr); + if (barrier_data() != ZLoadBarrierElided) { + z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, false /* weak */); + } %} - ins_pipe(pipe_slow); + + ins_pipe(iload_reg_mem); %} -// -// Execute ZGC load barrier (weak) slow path -// -instruct loadBarrierWeakSlowReg(iRegP dst, memory src, rFlagsReg cr, - vRegD_V0 v0, vRegD_V1 v1, vRegD_V2 v2, vRegD_V3 v3, vRegD_V4 v4, - vRegD_V5 v5, vRegD_V6 v6, vRegD_V7 v7, vRegD_V8 v8, vRegD_V9 v9, - vRegD_V10 v10, vRegD_V11 v11, vRegD_V12 v12, vRegD_V13 v13, vRegD_V14 v14, - vRegD_V15 v15, vRegD_V16 v16, vRegD_V17 v17, vRegD_V18 v18, vRegD_V19 v19, - vRegD_V20 v20, vRegD_V21 v21, vRegD_V22 v22, vRegD_V23 v23, vRegD_V24 v24, - vRegD_V25 v25, vRegD_V26 v26, vRegD_V27 v27, vRegD_V28 v28, vRegD_V29 v29, - vRegD_V30 v30, vRegD_V31 v31) %{ - match(Set dst (LoadBarrierSlowReg src dst)); - predicate(n->as_LoadBarrierSlowReg()->is_weak()); +// Load Weak Pointer +instruct zLoadWeakP(iRegPNoSp dst, memory mem, rFlagsReg cr) +%{ + match(Set dst (LoadP mem)); + predicate(UseZGC && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() == ZLoadBarrierWeak)); + effect(TEMP dst, KILL cr); - effect(KILL cr, - KILL v0, KILL v1, KILL v2, KILL v3, KILL v4, KILL v5, KILL v6, KILL v7, - KILL v8, KILL v9, KILL v10, KILL v11, KILL v12, KILL v13, KILL v14, - KILL v15, KILL v16, KILL v17, KILL v18, KILL v19, KILL v20, KILL v21, - KILL v22, KILL v23, KILL v24, KILL v25, KILL v26, KILL v27, KILL v28, - KILL v29, KILL v30, KILL v31); + ins_cost(4 * INSN_COST); - format %{ "lea $dst, $src\n\t" - "call #ZLoadBarrierSlowPath" %} + format %{ "ldr $dst, $mem" %} ins_encode %{ - z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$base$$Register, - $src$$index, $src$$scale, $src$$disp, true); + const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); + __ ldr($dst$$Register, ref_addr); + z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, true /* weak */); %} - ins_pipe(pipe_slow); + + ins_pipe(iload_reg_mem); %} +// Load Pointer Volatile +instruct zLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg cr) +%{ + match(Set dst (LoadP mem)); + predicate(UseZGC && needs_acquiring_load(n) && n->as_Load()->barrier_data() == ZLoadBarrierStrong); + effect(TEMP dst, KILL cr); -// Specialized versions of compareAndExchangeP that adds a keepalive that is consumed -// but doesn't affect output. + ins_cost(VOLATILE_REF_COST); -instruct z_compareAndExchangeP(iRegPNoSp res, indirect mem, - iRegP oldval, iRegP newval, iRegP keepalive, - rFlagsReg cr) %{ - match(Set res (ZCompareAndExchangeP (Binary mem keepalive) (Binary oldval newval))); - ins_cost(2 * VOLATILE_REF_COST); - effect(TEMP_DEF res, KILL cr); - format %{ - "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval" + format %{ "ldar $dst, $mem\t" %} + + ins_encode %{ + __ ldar($dst$$Register, $mem$$Register); + if (barrier_data() != ZLoadBarrierElided) { + z_load_barrier(_masm, this, Address($mem$$Register), $dst$$Register, rscratch2 /* tmp */, false /* weak */); + } %} - ins_encode %{ - __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, - Assembler::xword, /*acquire*/ false, /*release*/ true, - /*weak*/ false, $res$$Register); - %} - ins_pipe(pipe_slow); + + ins_pipe(pipe_serial); %} -instruct z_compareAndSwapP(iRegINoSp res, - indirect mem, - iRegP oldval, iRegP newval, iRegP keepalive, - rFlagsReg cr) %{ - - match(Set res (ZCompareAndSwapP (Binary mem keepalive) (Binary oldval newval))); - match(Set res (ZWeakCompareAndSwapP (Binary mem keepalive) (Binary oldval newval))); +instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ + match(Set res (CompareAndSwapP mem (Binary oldval newval))); + match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); + predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); + effect(KILL cr, TEMP_DEF res); ins_cost(2 * VOLATILE_REF_COST); - effect(KILL cr); + format %{ "cmpxchg $mem, $oldval, $newval\n\t" + "cset $res, EQ" %} - format %{ - "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval" - "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)" - %} - - ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval), - aarch64_enc_cset_eq(res)); + ins_encode %{ + guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, + false /* acquire */, true /* release */, false /* weak */, rscratch2); + __ cset($res$$Register, Assembler::EQ); + if (barrier_data() != ZLoadBarrierElided) { + Label good; + __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset())); + __ andr(rscratch1, rscratch1, rscratch2); + __ cbz(rscratch1, good); + z_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, + false /* acquire */, true /* release */, false /* weak */, rscratch2); + __ cset($res$$Register, Assembler::EQ); + __ bind(good); + } + %} ins_pipe(pipe_slow); %} +instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ + match(Set res (CompareAndSwapP mem (Binary oldval newval))); + match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); + predicate(UseZGC && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong)); + effect(KILL cr, TEMP_DEF res); -instruct z_get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev, - iRegP keepalive) %{ - match(Set prev (ZGetAndSetP mem (Binary newv keepalive))); + ins_cost(2 * VOLATILE_REF_COST); + + format %{ "cmpxchg $mem, $oldval, $newval\n\t" + "cset $res, EQ" %} + + ins_encode %{ + guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, + true /* acquire */, true /* release */, false /* weak */, rscratch2); + __ cset($res$$Register, Assembler::EQ); + if (barrier_data() != ZLoadBarrierElided) { + Label good; + __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset())); + __ andr(rscratch1, rscratch1, rscratch2); + __ cbz(rscratch1, good); + z_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */ ); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, + true /* acquire */, true /* release */, false /* weak */, rscratch2); + __ cset($res$$Register, Assembler::EQ); + __ bind(good); + } + %} + + ins_pipe(pipe_slow); +%} + +instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ + match(Set res (CompareAndExchangeP mem (Binary oldval newval))); + predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); + effect(TEMP_DEF res, KILL cr); ins_cost(2 * VOLATILE_REF_COST); - format %{ "atomic_xchg $prev, $newv, [$mem]" %} + + format %{ "cmpxchg $res = $mem, $oldval, $newval" %} + + ins_encode %{ + guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, + false /* acquire */, true /* release */, false /* weak */, $res$$Register); + if (barrier_data() != ZLoadBarrierElided) { + Label good; + __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset())); + __ andr(rscratch1, rscratch1, $res$$Register); + __ cbz(rscratch1, good); + z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, + false /* acquire */, true /* release */, false /* weak */, $res$$Register); + __ bind(good); + } + %} + + ins_pipe(pipe_slow); +%} + +instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ + match(Set res (CompareAndExchangeP mem (Binary oldval newval))); + predicate(UseZGC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); + effect(TEMP_DEF res, KILL cr); + + ins_cost(2 * VOLATILE_REF_COST); + + format %{ "cmpxchg $res = $mem, $oldval, $newval" %} + ins_encode %{ - __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base)); + guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, + true /* acquire */, true /* release */, false /* weak */, $res$$Register); + if (barrier_data() != ZLoadBarrierElided) { + Label good; + __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset())); + __ andr(rscratch1, rscratch1, $res$$Register); + __ cbz(rscratch1, good); + z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */); + __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, + true /* acquire */, true /* release */, false /* weak */, $res$$Register); + __ bind(good); + } + %} + + ins_pipe(pipe_slow); +%} + +instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ + match(Set prev (GetAndSetP mem newv)); + predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); + effect(TEMP_DEF prev, KILL cr); + + ins_cost(2 * VOLATILE_REF_COST); + + format %{ "atomic_xchg $prev, $newv, [$mem]" %} + + ins_encode %{ + __ atomic_xchg($prev$$Register, $newv$$Register, $mem$$Register); + if (barrier_data() != ZLoadBarrierElided) { + z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, false /* weak */); + } + %} + + ins_pipe(pipe_serial); +%} + +instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{ + match(Set prev (GetAndSetP mem newv)); + predicate(UseZGC && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong)); + effect(TEMP_DEF prev, KILL cr); + + ins_cost(VOLATILE_REF_COST); + + format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %} + + ins_encode %{ + __ atomic_xchgal($prev$$Register, $newv$$Register, $mem$$Register); + if (barrier_data() != ZLoadBarrierElided) { + z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, false /* weak */); + } %} ins_pipe(pipe_serial); %} diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -2132,6 +2132,65 @@ return count; } + +// Push lots of registers in the bit set supplied. Don't push sp. +// Return the number of words pushed +int MacroAssembler::push_fp(unsigned int bitset, Register stack) { + int words_pushed = 0; + + // Scan bitset to accumulate register pairs + unsigned char regs[32]; + int count = 0; + for (int reg = 0; reg <= 31; reg++) { + if (1 & bitset) + regs[count++] = reg; + bitset >>= 1; + } + regs[count++] = zr->encoding_nocheck(); + count &= ~1; // Only push an even number of regs + + // Always pushing full 128 bit registers. + if (count) { + stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -count * wordSize * 2))); + words_pushed += 2; + } + for (int i = 2; i < count; i += 2) { + stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); + words_pushed += 2; + } + + assert(words_pushed == count, "oops, pushed != count"); + return count; +} + +int MacroAssembler::pop_fp(unsigned int bitset, Register stack) { + int words_pushed = 0; + + // Scan bitset to accumulate register pairs + unsigned char regs[32]; + int count = 0; + for (int reg = 0; reg <= 31; reg++) { + if (1 & bitset) + regs[count++] = reg; + bitset >>= 1; + } + regs[count++] = zr->encoding_nocheck(); + count &= ~1; + + for (int i = 2; i < count; i += 2) { + ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); + words_pushed += 2; + } + if (count) { + ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, count * wordSize * 2))); + words_pushed += 2; + } + + assert(words_pushed == count, "oops, pushed != count"); + + return count; +} + #ifdef ASSERT void MacroAssembler::verify_heapbase(const char* msg) { #if 0 diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -442,12 +442,18 @@ int push(unsigned int bitset, Register stack); int pop(unsigned int bitset, Register stack); + int push_fp(unsigned int bitset, Register stack); + int pop_fp(unsigned int bitset, Register stack); + void mov(Register dst, Address a); public: void push(RegSet regs, Register stack) { if (regs.bits()) push(regs.bits(), stack); } void pop(RegSet regs, Register stack) { if (regs.bits()) pop(regs.bits(), stack); } + void push_fp(RegSet regs, Register stack) { if (regs.bits()) push_fp(regs.bits(), stack); } + void pop_fp(RegSet regs, Register stack) { if (regs.bits()) pop_fp(regs.bits(), stack); } + // Push and pop everything that might be clobbered by a native // runtime call except rscratch1 and rscratch2. (They are always // scratch, so we don't have to protect them.) Only save the lower diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp --- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -332,9 +332,14 @@ // We use jump to self as the unresolved address which the inline // cache code (and relocs) know about + // As a special case we also use sequence movptr(r,0); br(r); + // i.e. jump to 0 when we need leave space for a wide immediate + // load - // return -1 if jump to self - dest = (dest == (address) this) ? (address) -1 : dest; + // return -1 if jump to self or to 0 + if ((dest == (address)this) || dest == 0) { + dest = (address) -1; + } return dest; } @@ -356,9 +361,13 @@ // We use jump to self as the unresolved address which the inline // cache code (and relocs) know about + // As a special case we also use jump to 0 when first generating + // a general jump - // return -1 if jump to self - dest = (dest == (address) this) ? (address) -1 : dest; + // return -1 if jump to self or to 0 + if ((dest == (address)this) || dest == 0) { + dest = (address) -1; + } return dest; } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/aarch64/register_aarch64.hpp --- a/src/hotspot/cpu/aarch64/register_aarch64.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/aarch64/register_aarch64.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -230,6 +230,11 @@ return *this; } + RegSet &operator-=(const RegSet aSet) { + *this = *this - aSet; + return *this; + } + static RegSet of(Register r1) { return RegSet(r1); } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp --- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -34,6 +34,7 @@ #include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" +#include "oops/klass.inline.hpp" #include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/vframeArray.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/arm/abstractInterpreter_arm.cpp --- a/src/hotspot/cpu/arm/abstractInterpreter_arm.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/arm/abstractInterpreter_arm.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -27,6 +27,7 @@ #include "interpreter/bytecode.hpp" #include "interpreter/interpreter.hpp" #include "oops/constMethod.hpp" +#include "oops/klass.inline.hpp" #include "oops/method.hpp" #include "prims/methodHandles.hpp" #include "runtime/handles.inline.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/arm/compiledIC_arm.cpp --- a/src/hotspot/cpu/arm/compiledIC_arm.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/arm/compiledIC_arm.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -115,16 +115,7 @@ // Creation also verifies the object. NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); - -#ifdef ASSERT - // read the value once - volatile intptr_t data = method_holder->data(); - volatile address destination = jump->jump_destination(); - assert(data == 0 || data == (intptr_t)callee(), - "a) MT-unsafe modification of inline cache"); - assert(destination == (address)-1 || destination == entry, - "b) MT-unsafe modification of inline cache"); -#endif + verify_mt_safe(callee, entry, method_holder, jump); // Update stub. method_holder->set_data((intptr_t)callee()); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/arm/sharedRuntime_arm.cpp --- a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -32,6 +32,7 @@ #include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" +#include "oops/klass.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/vframeArray.hpp" #include "utilities/align.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/ppc/abstractInterpreter_ppc.cpp --- a/src/hotspot/cpu/ppc/abstractInterpreter_ppc.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/ppc/abstractInterpreter_ppc.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -26,6 +26,7 @@ #include "precompiled.hpp" #include "interpreter/interpreter.hpp" #include "oops/constMethod.hpp" +#include "oops/klass.inline.hpp" #include "oops/method.hpp" #include "runtime/frame.inline.hpp" #include "utilities/debug.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/ppc/compiledIC_ppc.cpp --- a/src/hotspot/cpu/ppc/compiledIC_ppc.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/ppc/compiledIC_ppc.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2015 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -178,15 +178,7 @@ NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub); NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); -#ifdef ASSERT - // read the value once - volatile intptr_t data = method_holder->data(); - volatile address destination = jump->jump_destination(); - assert(data == 0 || data == (intptr_t)callee(), - "a) MT-unsafe modification of inline cache"); - assert(destination == (address)-1 || destination == entry, - "b) MT-unsafe modification of inline cache"); -#endif + verify_mt_safe(callee, entry, method_holder, jump); // Update stub. method_holder->set_data((intptr_t)callee()); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/ppc/macroAssembler_ppc.cpp --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -32,6 +32,7 @@ #include "interpreter/interpreter.hpp" #include "memory/resourceArea.hpp" #include "nativeInst_ppc.hpp" +#include "oops/klass.inline.hpp" #include "prims/methodHandles.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/icache.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp --- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -34,6 +34,7 @@ #include "interpreter/interp_masm.hpp" #include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" +#include "oops/klass.inline.hpp" #include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/vframeArray.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/ppc/templateTable_ppc_64.cpp --- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -32,6 +32,7 @@ #include "interpreter/templateInterpreter.hpp" #include "interpreter/templateTable.hpp" #include "memory/universe.hpp" +#include "oops/klass.inline.hpp" #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" #include "prims/methodHandles.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp --- a/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -30,6 +30,7 @@ #include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" #include "oops/instanceKlass.hpp" +#include "oops/klass.inline.hpp" #include "oops/klassVtable.hpp" #include "runtime/sharedRuntime.hpp" #include "vmreg_ppc.inline.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/s390/abstractInterpreter_s390.cpp --- a/src/hotspot/cpu/s390/abstractInterpreter_s390.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/s390/abstractInterpreter_s390.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -26,6 +26,7 @@ #include "precompiled.hpp" #include "interpreter/interpreter.hpp" #include "oops/constMethod.hpp" +#include "oops/klass.inline.hpp" #include "oops/method.hpp" #include "runtime/frame.inline.hpp" #include "utilities/debug.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/s390/compiledIC_s390.cpp --- a/src/hotspot/cpu/s390/compiledIC_s390.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/s390/compiledIC_s390.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -104,19 +104,7 @@ // Creation also verifies the object. NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeCall::get_IC_pos_in_java_to_interp_stub()); NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); - -#ifdef ASSERT - // A generated lambda form might be deleted from the Lambdaform - // cache in MethodTypeForm. If a jit compiled lambdaform method - // becomes not entrant and the cache access returns null, the new - // resolve will lead to a new generated LambdaForm. - volatile intptr_t data = method_holder->data(); - volatile address destination = jump->jump_destination(); - assert(data == 0 || data == (intptr_t)callee() || callee->is_compiled_lambda_form(), - "a) MT-unsafe modification of inline cache"); - assert(destination == (address)-1 || destination == entry, - "b) MT-unsafe modification of inline cache"); -#endif + verify_mt_safe(callee, entry, method_holder, jump); // Update stub. method_holder->set_data((intptr_t)callee(), relocInfo::metadata_type); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/s390/sharedRuntime_s390.cpp --- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -33,6 +33,7 @@ #include "interpreter/interp_masm.hpp" #include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" +#include "oops/klass.inline.hpp" #include "registerSaver_s390.hpp" #include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/s390/vtableStubs_s390.cpp --- a/src/hotspot/cpu/s390/vtableStubs_s390.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/s390/vtableStubs_s390.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -30,6 +30,7 @@ #include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" #include "oops/instanceKlass.hpp" +#include "oops/klass.inline.hpp" #include "oops/klassVtable.hpp" #include "runtime/sharedRuntime.hpp" #include "vmreg_s390.inline.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/sparc/abstractInterpreter_sparc.cpp --- a/src/hotspot/cpu/sparc/abstractInterpreter_sparc.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/sparc/abstractInterpreter_sparc.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "interpreter/interpreter.hpp" #include "oops/constMethod.hpp" +#include "oops/klass.inline.hpp" #include "oops/method.hpp" #include "runtime/arguments.hpp" #include "runtime/frame.inline.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/sparc/compiledIC_sparc.cpp --- a/src/hotspot/cpu/sparc/compiledIC_sparc.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/sparc/compiledIC_sparc.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -104,16 +104,7 @@ // Creation also verifies the object. NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); - -#ifdef ASSERT - // read the value once - volatile intptr_t data = method_holder->data(); - volatile address destination = jump->jump_destination(); - assert(data == 0 || data == (intptr_t)callee(), - "a) MT-unsafe modification of inline cache"); - assert(destination == (address)-1 || destination == entry, - "b) MT-unsafe modification of inline cache"); -#endif + verify_mt_safe(callee, entry, method_holder, jump); // Update stub. method_holder->set_data((intptr_t)callee()); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp --- a/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -32,6 +32,7 @@ #include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" +#include "oops/klass.inline.hpp" #include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/vframeArray.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/x86/abstractInterpreter_x86.cpp --- a/src/hotspot/cpu/x86/abstractInterpreter_x86.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/x86/abstractInterpreter_x86.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "ci/ciMethod.hpp" #include "interpreter/interpreter.hpp" +#include "oops/klass.inline.hpp" #include "runtime/frame.inline.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/x86/compiledIC_x86.cpp --- a/src/hotspot/cpu/x86/compiledIC_x86.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/x86/compiledIC_x86.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -157,16 +157,7 @@ // Creation also verifies the object. NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); - -#ifdef ASSERT - Method* old_method = reinterpret_cast(method_holder->data()); - address destination = jump->jump_destination(); - assert(old_method == NULL || old_method == callee() || - !old_method->method_holder()->is_loader_alive(), - "a) MT-unsafe modification of inline cache"); - assert(destination == (address)-1 || destination == entry, - "b) MT-unsafe modification of inline cache"); -#endif + verify_mt_safe(callee, entry, method_holder, jump); // Update stub. method_holder->set_data((intptr_t)callee()); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/x86/gc/z/zArguments_x86.cpp --- a/src/hotspot/cpu/x86/gc/z/zArguments_x86.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/x86/gc/z/zArguments_x86.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -23,20 +23,7 @@ #include "precompiled.hpp" #include "gc/z/zArguments.hpp" -#include "runtime/globals.hpp" -#include "runtime/globals_extension.hpp" -#include "utilities/debug.hpp" void ZArguments::initialize_platform() { -#ifdef COMPILER2 - // The C2 barrier slow path expects vector registers to be least - // 16 bytes wide, which is the minimum width available on all - // x86-64 systems. However, the user could have speficied a lower - // number on the command-line, in which case we print a warning - // and raise it to 16. - if (MaxVectorSize < 16) { - warning("ZGC requires MaxVectorSize to be at least 16"); - FLAG_SET_DEFAULT(MaxVectorSize, 16); - } -#endif + // Does nothing } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp --- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -24,22 +24,22 @@ #include "precompiled.hpp" #include "asm/macroAssembler.inline.hpp" #include "code/codeBlob.hpp" +#include "code/vmreg.inline.hpp" #include "gc/z/zBarrier.inline.hpp" #include "gc/z/zBarrierSet.hpp" #include "gc/z/zBarrierSetAssembler.hpp" #include "gc/z/zBarrierSetRuntime.hpp" #include "memory/resourceArea.hpp" -#include "runtime/stubCodeGenerator.hpp" +#include "runtime/sharedRuntime.hpp" #include "utilities/macros.hpp" #ifdef COMPILER1 #include "c1/c1_LIRAssembler.hpp" #include "c1/c1_MacroAssembler.hpp" #include "gc/z/c1/zBarrierSetC1.hpp" #endif // COMPILER1 - -ZBarrierSetAssembler::ZBarrierSetAssembler() : - _load_barrier_slow_stub(), - _load_barrier_weak_slow_stub() {} +#ifdef COMPILER2 +#include "gc/z/c2/zBarrierSetC2.hpp" +#endif // COMPILER2 #ifdef PRODUCT #define BLOCK_COMMENT(str) /* nothing */ @@ -344,137 +344,327 @@ #endif // COMPILER1 +#ifdef COMPILER2 + +OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) { + if (!OptoReg::is_reg(opto_reg)) { + return OptoReg::Bad; + } + + const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); + if (vm_reg->is_XMMRegister()) { + opto_reg &= ~15; + switch (node->ideal_reg()) { + case Op_VecX: + opto_reg |= 2; + break; + case Op_VecY: + opto_reg |= 4; + break; + case Op_VecZ: + opto_reg |= 8; + break; + default: + opto_reg |= 1; + break; + } + } + + return opto_reg; +} + +// We use the vec_spill_helper from the x86.ad file to avoid reinventing this wheel +extern int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load, + int stack_offset, int reg, uint ireg, outputStream* st); + #undef __ -#define __ cgen->assembler()-> +#define __ _masm-> + +class ZSaveLiveRegisters { +private: + struct XMMRegisterData { + XMMRegister _reg; + int _size; + + // Used by GrowableArray::find() + bool operator == (const XMMRegisterData& other) { + return _reg == other._reg; + } + }; + + MacroAssembler* const _masm; + GrowableArray _gp_registers; + GrowableArray _xmm_registers; + int _spill_size; + int _spill_offset; -// Generates a register specific stub for calling -// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or -// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded(). -// -// The raddr register serves as both input and output for this stub. When the stub is -// called the raddr register contains the object field address (oop*) where the bad oop -// was loaded from, which caused the slow path to be taken. On return from the stub the -// raddr register contains the good/healed oop returned from -// ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded() or -// ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded(). -static address generate_load_barrier_stub(StubCodeGenerator* cgen, Register raddr, DecoratorSet decorators) { - // Don't generate stub for invalid registers - if (raddr == rsp || raddr == r15) { - return NULL; + static int xmm_compare_register_size(XMMRegisterData* left, XMMRegisterData* right) { + if (left->_size == right->_size) { + return 0; + } + + return (left->_size < right->_size) ? -1 : 1; + } + + static int xmm_slot_size(OptoReg::Name opto_reg) { + // The low order 4 bytes denote what size of the XMM register is live + return (opto_reg & 15) << 3; + } + + static uint xmm_ideal_reg_for_size(int reg_size) { + switch (reg_size) { + case 8: + return Op_VecD; + case 16: + return Op_VecX; + case 32: + return Op_VecY; + case 64: + return Op_VecZ; + default: + fatal("Invalid register size %d", reg_size); + return 0; + } + } + + bool xmm_needs_vzeroupper() const { + return _xmm_registers.is_nonempty() && _xmm_registers.at(0)._size > 16; + } + + void xmm_register_save(const XMMRegisterData& reg_data) { + const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg()); + const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size); + _spill_offset -= reg_data._size; + vec_spill_helper(__ code(), false /* do_size */, false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty); + } + + void xmm_register_restore(const XMMRegisterData& reg_data) { + const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg()); + const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size); + vec_spill_helper(__ code(), false /* do_size */, true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty); + _spill_offset += reg_data._size; + } + + void gp_register_save(Register reg) { + _spill_offset -= 8; + __ movq(Address(rsp, _spill_offset), reg); + } + + void gp_register_restore(Register reg) { + __ movq(reg, Address(rsp, _spill_offset)); + _spill_offset += 8; } - // Create stub name - char name[64]; - const bool weak = (decorators & ON_WEAK_OOP_REF) != 0; - os::snprintf(name, sizeof(name), "zgc_load_barrier%s_stub_%s", weak ? "_weak" : "", raddr->name()); + void initialize(ZLoadBarrierStubC2* stub) { + // Create mask of caller saved registers that need to + // be saved/restored if live + RegMask caller_saved; + caller_saved.Insert(OptoReg::as_OptoReg(rax->as_VMReg())); + caller_saved.Insert(OptoReg::as_OptoReg(rcx->as_VMReg())); + caller_saved.Insert(OptoReg::as_OptoReg(rdx->as_VMReg())); + caller_saved.Insert(OptoReg::as_OptoReg(rsi->as_VMReg())); + caller_saved.Insert(OptoReg::as_OptoReg(rdi->as_VMReg())); + caller_saved.Insert(OptoReg::as_OptoReg(r8->as_VMReg())); + caller_saved.Insert(OptoReg::as_OptoReg(r9->as_VMReg())); + caller_saved.Insert(OptoReg::as_OptoReg(r10->as_VMReg())); + caller_saved.Insert(OptoReg::as_OptoReg(r11->as_VMReg())); + caller_saved.Remove(OptoReg::as_OptoReg(stub->ref()->as_VMReg())); - __ align(CodeEntryAlignment); - StubCodeMark mark(cgen, "StubRoutines", os::strdup(name, mtCode)); - address start = __ pc(); + // Create mask of live registers + RegMask live = stub->live(); + if (stub->tmp() != noreg) { + live.Insert(OptoReg::as_OptoReg(stub->tmp()->as_VMReg())); + } + + int gp_spill_size = 0; + int xmm_spill_size = 0; + + // Record registers that needs to be saved/restored + while (live.is_NotEmpty()) { + const OptoReg::Name opto_reg = live.find_first_elem(); + const VMReg vm_reg = OptoReg::as_VMReg(opto_reg); + + live.Remove(opto_reg); - // Save live registers - if (raddr != rax) { - __ push(rax); - } - if (raddr != rcx) { - __ push(rcx); - } - if (raddr != rdx) { - __ push(rdx); - } - if (raddr != rsi) { - __ push(rsi); - } - if (raddr != rdi) { - __ push(rdi); - } - if (raddr != r8) { - __ push(r8); - } - if (raddr != r9) { - __ push(r9); - } - if (raddr != r10) { - __ push(r10); - } - if (raddr != r11) { - __ push(r11); + if (vm_reg->is_Register()) { + if (caller_saved.Member(opto_reg)) { + _gp_registers.append(vm_reg->as_Register()); + gp_spill_size += 8; + } + } else if (vm_reg->is_XMMRegister()) { + // We encode in the low order 4 bits of the opto_reg, how large part of the register is live + const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~15); + const int reg_size = xmm_slot_size(opto_reg); + const XMMRegisterData reg_data = { vm_reg_base->as_XMMRegister(), reg_size }; + const int reg_index = _xmm_registers.find(reg_data); + if (reg_index == -1) { + // Not previously appended + _xmm_registers.append(reg_data); + xmm_spill_size += reg_size; + } else { + // Previously appended, update size + const int reg_size_prev = _xmm_registers.at(reg_index)._size; + if (reg_size > reg_size_prev) { + _xmm_registers.at_put(reg_index, reg_data); + xmm_spill_size += reg_size - reg_size_prev; + } + } + } else { + fatal("Unexpected register type"); + } + } + + // Sort by size, largest first + _xmm_registers.sort(xmm_compare_register_size); + + // Stack pointer must be 16 bytes aligned for the call + _spill_offset = _spill_size = align_up(xmm_spill_size + gp_spill_size, 16); } - // Setup arguments - if (raddr != c_rarg1) { - __ movq(c_rarg1, raddr); - } - __ movq(c_rarg0, Address(raddr, 0)); +public: + ZSaveLiveRegisters(MacroAssembler* masm, ZLoadBarrierStubC2* stub) : + _masm(masm), + _gp_registers(), + _xmm_registers(), + _spill_size(0), + _spill_offset(0) { - // Call barrier function - __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1); + // + // Stack layout after registers have been spilled: + // + // | ... | original rsp, 16 bytes aligned + // ------------------ + // | zmm0 high | + // | ... | + // | zmm0 low | 16 bytes aligned + // | ... | + // | ymm1 high | + // | ... | + // | ymm1 low | 16 bytes aligned + // | ... | + // | xmmN high | + // | ... | + // | xmmN low | 8 bytes aligned + // | reg0 | 8 bytes aligned + // | reg1 | + // | ... | + // | regN | new rsp, if 16 bytes aligned + // | | else new rsp, 16 bytes aligned + // ------------------ + // - // Move result returned in rax to raddr, if needed - if (raddr != rax) { - __ movq(raddr, rax); + // Figure out what registers to save/restore + initialize(stub); + + // Allocate stack space + if (_spill_size > 0) { + __ subptr(rsp, _spill_size); + } + + // Save XMM/YMM/ZMM registers + for (int i = 0; i < _xmm_registers.length(); i++) { + xmm_register_save(_xmm_registers.at(i)); + } + + if (xmm_needs_vzeroupper()) { + __ vzeroupper(); + } + + // Save general purpose registers + for (int i = 0; i < _gp_registers.length(); i++) { + gp_register_save(_gp_registers.at(i)); + } } - // Restore saved registers - if (raddr != r11) { - __ pop(r11); - } - if (raddr != r10) { - __ pop(r10); - } - if (raddr != r9) { - __ pop(r9); - } - if (raddr != r8) { - __ pop(r8); + ~ZSaveLiveRegisters() { + // Restore general purpose registers + for (int i = _gp_registers.length() - 1; i >= 0; i--) { + gp_register_restore(_gp_registers.at(i)); + } + + __ vzeroupper(); + + // Restore XMM/YMM/ZMM registers + for (int i = _xmm_registers.length() - 1; i >= 0; i--) { + xmm_register_restore(_xmm_registers.at(i)); + } + + // Free stack space + if (_spill_size > 0) { + __ addptr(rsp, _spill_size); + } } - if (raddr != rdi) { - __ pop(rdi); - } - if (raddr != rsi) { - __ pop(rsi); - } - if (raddr != rdx) { - __ pop(rdx); - } - if (raddr != rcx) { - __ pop(rcx); - } - if (raddr != rax) { - __ pop(rax); +}; + +class ZSetupArguments { +private: + MacroAssembler* const _masm; + const Register _ref; + const Address _ref_addr; + +public: + ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) : + _masm(masm), + _ref(stub->ref()), + _ref_addr(stub->ref_addr()) { + + // Setup arguments + if (_ref_addr.base() == noreg) { + // No self healing + if (_ref != c_rarg0) { + __ movq(c_rarg0, _ref); + } + __ xorq(c_rarg1, c_rarg1); + } else { + // Self healing + if (_ref == c_rarg0) { + __ lea(c_rarg1, _ref_addr); + } else if (_ref != c_rarg1) { + __ lea(c_rarg1, _ref_addr); + __ movq(c_rarg0, _ref); + } else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) { + __ movq(c_rarg0, _ref); + __ lea(c_rarg1, _ref_addr); + } else { + __ xchgq(c_rarg0, c_rarg1); + if (_ref_addr.base() == c_rarg0) { + __ lea(c_rarg1, Address(c_rarg1, _ref_addr.index(), _ref_addr.scale(), _ref_addr.disp())); + } else if (_ref_addr.index() == c_rarg0) { + __ lea(c_rarg1, Address(_ref_addr.base(), c_rarg1, _ref_addr.scale(), _ref_addr.disp())); + } else { + ShouldNotReachHere(); + } + } + } } - __ ret(0); + ~ZSetupArguments() { + // Transfer result + if (_ref != rax) { + __ movq(_ref, rax); + } + } +}; + +#undef __ +#define __ masm-> - return start; +void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const { + BLOCK_COMMENT("ZLoadBarrierStubC2"); + + // Stub entry + __ bind(*stub->entry()); + + { + ZSaveLiveRegisters save_live_registers(masm, stub); + ZSetupArguments setup_arguments(masm, stub); + __ call(RuntimeAddress(stub->slow_path())); + } + + // Stub exit + __ jmp(*stub->continuation()); } #undef __ -static void barrier_stubs_init_inner(const char* label, const DecoratorSet decorators, address* stub) { - const int nregs = RegisterImpl::number_of_registers; - const int code_size = nregs * 128; // Rough estimate of code size - - ResourceMark rm; - - CodeBuffer buf(BufferBlob::create(label, code_size)); - StubCodeGenerator cgen(&buf); - - for (int i = 0; i < nregs; i++) { - const Register reg = as_Register(i); - stub[i] = generate_load_barrier_stub(&cgen, reg, decorators); - } -} - -void ZBarrierSetAssembler::barrier_stubs_init() { - barrier_stubs_init_inner("zgc_load_barrier_stubs", ON_STRONG_OOP_REF, _load_barrier_slow_stub); - barrier_stubs_init_inner("zgc_load_barrier_weak_stubs", ON_WEAK_OOP_REF, _load_barrier_weak_slow_stub); -} - -address ZBarrierSetAssembler::load_barrier_slow_stub(Register reg) { - return _load_barrier_slow_stub[reg->encoding()]; -} - -address ZBarrierSetAssembler::load_barrier_weak_slow_stub(Register reg) { - return _load_barrier_weak_slow_stub[reg->encoding()]; -} +#endif // COMPILER2 diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp --- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -24,6 +24,14 @@ #ifndef CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP #define CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP +#include "code/vmreg.hpp" +#include "oops/accessDecorators.hpp" +#ifdef COMPILER2 +#include "opto/optoreg.hpp" +#endif // COMPILER2 + +class MacroAssembler; + #ifdef COMPILER1 class LIR_Assembler; class LIR_OprDesc; @@ -32,14 +40,13 @@ class ZLoadBarrierStubC1; #endif // COMPILER1 +#ifdef COMPILER2 +class Node; +class ZLoadBarrierStubC2; +#endif // COMPILER2 + class ZBarrierSetAssembler : public ZBarrierSetAssemblerBase { -private: - address _load_barrier_slow_stub[RegisterImpl::number_of_registers]; - address _load_barrier_weak_slow_stub[RegisterImpl::number_of_registers]; - public: - ZBarrierSetAssembler(); - virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, @@ -82,10 +89,13 @@ DecoratorSet decorators) const; #endif // COMPILER1 - virtual void barrier_stubs_init(); +#ifdef COMPILER2 + OptoReg::Name refine_register(const Node* node, + OptoReg::Name opto_reg); - address load_barrier_slow_stub(Register reg); - address load_barrier_weak_slow_stub(Register reg); + void generate_c2_load_barrier_stub(MacroAssembler* masm, + ZLoadBarrierStubC2* stub) const; +#endif // COMPILER2 }; #endif // CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/x86/gc/z/z_x86_64.ad --- a/src/hotspot/cpu/x86/gc/z/z_x86_64.ad Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/x86/gc/z/z_x86_64.ad Wed Oct 16 15:31:05 2019 +0200 @@ -24,190 +24,144 @@ source_hpp %{ #include "gc/z/c2/zBarrierSetC2.hpp" +#include "gc/z/zThreadLocalData.hpp" %} source %{ -#include "gc/z/zBarrierSetAssembler.hpp" +static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak) { + ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, weak); + __ testptr(ref, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset())); + __ jcc(Assembler::notZero, *stub->entry()); + __ bind(*stub->continuation()); +} -static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst, Address src, bool weak) { - assert(dst != rsp, "Invalid register"); - assert(dst != r15, "Invalid register"); - - const address stub = weak ? ZBarrierSet::assembler()->load_barrier_weak_slow_stub(dst) - : ZBarrierSet::assembler()->load_barrier_slow_stub(dst); - __ lea(dst, src); - __ call(RuntimeAddress(stub)); +static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) { + ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, false /* weak */); + __ jmp(*stub->entry()); + __ bind(*stub->continuation()); } %} -// For XMM and YMM enabled processors -instruct zLoadBarrierSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr, - rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3, - rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7, - rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, - rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{ - match(Set dst (LoadBarrierSlowReg src dst)); - predicate(UseAVX <= 2 && !n->as_LoadBarrierSlowReg()->is_weak()); +// Load Pointer +instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr) +%{ + predicate(UseZGC && n->as_Load()->barrier_data() == ZLoadBarrierStrong); + match(Set dst (LoadP mem)); + effect(KILL cr, TEMP dst); - effect(KILL cr, - KILL x0, KILL x1, KILL x2, KILL x3, - KILL x4, KILL x5, KILL x6, KILL x7, - KILL x8, KILL x9, KILL x10, KILL x11, - KILL x12, KILL x13, KILL x14, KILL x15); + ins_cost(125); - format %{ "lea $dst, $src\n\t" - "call #ZLoadBarrierSlowPath" %} + format %{ "movq $dst, $mem" %} ins_encode %{ - z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */); + __ movptr($dst$$Register, $mem$$Address); + if (barrier_data() != ZLoadBarrierElided) { + z_load_barrier(_masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, false /* weak */); + } %} - ins_pipe(pipe_slow); + + ins_pipe(ialu_reg_mem); %} -// For ZMM enabled processors -instruct zLoadBarrierSlowRegZmm(rRegP dst, memory src, rFlagsReg cr, - rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3, - rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7, - rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, - rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15, - rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19, - rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23, - rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27, - rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{ +// Load Weak Pointer +instruct zLoadWeakP(rRegP dst, memory mem, rFlagsReg cr) +%{ + predicate(UseZGC && n->as_Load()->barrier_data() == ZLoadBarrierWeak); + match(Set dst (LoadP mem)); + effect(KILL cr, TEMP dst); - match(Set dst (LoadBarrierSlowReg src dst)); - predicate(UseAVX == 3 && !n->as_LoadBarrierSlowReg()->is_weak()); + ins_cost(125); - effect(KILL cr, - KILL x0, KILL x1, KILL x2, KILL x3, - KILL x4, KILL x5, KILL x6, KILL x7, - KILL x8, KILL x9, KILL x10, KILL x11, - KILL x12, KILL x13, KILL x14, KILL x15, - KILL x16, KILL x17, KILL x18, KILL x19, - KILL x20, KILL x21, KILL x22, KILL x23, - KILL x24, KILL x25, KILL x26, KILL x27, - KILL x28, KILL x29, KILL x30, KILL x31); - - format %{ "lea $dst, $src\n\t" - "call #ZLoadBarrierSlowPath" %} + format %{ "movq $dst, $mem" %} ins_encode %{ - z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */); + __ movptr($dst$$Register, $mem$$Address); + z_load_barrier(_masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, true /* weak */); %} - ins_pipe(pipe_slow); -%} - -// For XMM and YMM enabled processors -instruct zLoadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr, - rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3, - rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7, - rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, - rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{ - match(Set dst (LoadBarrierSlowReg src dst)); - predicate(UseAVX <= 2 && n->as_LoadBarrierSlowReg()->is_weak()); - effect(KILL cr, - KILL x0, KILL x1, KILL x2, KILL x3, - KILL x4, KILL x5, KILL x6, KILL x7, - KILL x8, KILL x9, KILL x10, KILL x11, - KILL x12, KILL x13, KILL x14, KILL x15); - - format %{ "lea $dst, $src\n\t" - "call #ZLoadBarrierSlowPath" %} - - ins_encode %{ - z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */); - %} - ins_pipe(pipe_slow); + ins_pipe(ialu_reg_mem); %} -// For ZMM enabled processors -instruct zLoadBarrierWeakSlowRegZmm(rRegP dst, memory src, rFlagsReg cr, - rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3, - rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7, - rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, - rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15, - rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19, - rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23, - rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27, - rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{ +instruct zCompareAndExchangeP(memory mem, rax_RegP oldval, rRegP newval, rRegP tmp, rFlagsReg cr) %{ + match(Set oldval (CompareAndExchangeP mem (Binary oldval newval))); + predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); + effect(KILL cr, TEMP tmp); - match(Set dst (LoadBarrierSlowReg src dst)); - predicate(UseAVX == 3 && n->as_LoadBarrierSlowReg()->is_weak()); - - effect(KILL cr, - KILL x0, KILL x1, KILL x2, KILL x3, - KILL x4, KILL x5, KILL x6, KILL x7, - KILL x8, KILL x9, KILL x10, KILL x11, - KILL x12, KILL x13, KILL x14, KILL x15, - KILL x16, KILL x17, KILL x18, KILL x19, - KILL x20, KILL x21, KILL x22, KILL x23, - KILL x24, KILL x25, KILL x26, KILL x27, - KILL x28, KILL x29, KILL x30, KILL x31); - - format %{ "lea $dst, $src\n\t" - "call #ZLoadBarrierSlowPath" %} + format %{ "lock\n\t" + "cmpxchgq $newval, $mem" %} ins_encode %{ - z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */); + if (barrier_data() != ZLoadBarrierElided) { + __ movptr($tmp$$Register, $oldval$$Register); + } + __ lock(); + __ cmpxchgptr($newval$$Register, $mem$$Address); + if (barrier_data() != ZLoadBarrierElided) { + Label good; + __ testptr($oldval$$Register, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset())); + __ jcc(Assembler::zero, good); + z_load_barrier_slow_path(_masm, this, $mem$$Address, $oldval$$Register, $tmp$$Register); + __ movptr($oldval$$Register, $tmp$$Register); + __ lock(); + __ cmpxchgptr($newval$$Register, $mem$$Address); + __ bind(good); + } %} - ins_pipe(pipe_slow); + + ins_pipe(pipe_cmpxchg); %} -// Specialized versions of compareAndExchangeP that adds a keepalive that is consumed -// but doesn't affect output. +instruct zCompareAndSwapP(rRegI res, memory mem, rRegP newval, rRegP tmp, rFlagsReg cr, rax_RegP oldval) %{ + match(Set res (CompareAndSwapP mem (Binary oldval newval))); + match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); + predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); + effect(KILL cr, KILL oldval, TEMP tmp); + + format %{ "lock\n\t" + "cmpxchgq $newval, $mem\n\t" + "sete $res\n\t" + "movzbl $res, $res" %} -instruct z_compareAndExchangeP( - memory mem_ptr, - rax_RegP oldval, rRegP newval, rRegP keepalive, - rFlagsReg cr) %{ - predicate(VM_Version::supports_cx8()); - match(Set oldval (ZCompareAndExchangeP (Binary mem_ptr keepalive) (Binary oldval newval))); - effect(KILL cr); + ins_encode %{ + if (barrier_data() != ZLoadBarrierElided) { + __ movptr($tmp$$Register, $oldval$$Register); + } + __ lock(); + __ cmpxchgptr($newval$$Register, $mem$$Address); + if (barrier_data() != ZLoadBarrierElided) { + Label good; + __ testptr($oldval$$Register, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset())); + __ jcc(Assembler::zero, good); + z_load_barrier_slow_path(_masm, this, $mem$$Address, $oldval$$Register, $tmp$$Register); + __ movptr($oldval$$Register, $tmp$$Register); + __ lock(); + __ cmpxchgptr($newval$$Register, $mem$$Address); + __ bind(good); + __ cmpptr($tmp$$Register, $oldval$$Register); + } + __ setb(Assembler::equal, $res$$Register); + __ movzbl($res$$Register, $res$$Register); + %} - format %{ "cmpxchgq $mem_ptr,$newval\t# " - "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %} - opcode(0x0F, 0xB1); - ins_encode(lock_prefix, - REX_reg_mem_wide(newval, mem_ptr), - OpcP, OpcS, - reg_mem(newval, mem_ptr) // lock cmpxchg - ); - ins_pipe( pipe_cmpxchg ); + ins_pipe(pipe_cmpxchg); %} -instruct z_compareAndSwapP(rRegI res, - memory mem_ptr, - rax_RegP oldval, rRegP newval, rRegP keepalive, - rFlagsReg cr) %{ - predicate(VM_Version::supports_cx8()); - match(Set res (ZCompareAndSwapP (Binary mem_ptr keepalive) (Binary oldval newval))); - match(Set res (ZWeakCompareAndSwapP (Binary mem_ptr keepalive) (Binary oldval newval))); - effect(KILL cr, KILL oldval); +instruct zXChgP(memory mem, rRegP newval, rFlagsReg cr) %{ + match(Set newval (GetAndSetP mem newval)); + predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong); + effect(KILL cr); + + format %{ "xchgq $newval, $mem" %} - format %{ "cmpxchgq $mem_ptr,$newval\t# " - "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" - "sete $res\n\t" - "movzbl $res, $res" %} - opcode(0x0F, 0xB1); - ins_encode(lock_prefix, - REX_reg_mem_wide(newval, mem_ptr), - OpcP, OpcS, - reg_mem(newval, mem_ptr), - REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete - REX_reg_breg(res, res), // movzbl - Opcode(0xF), Opcode(0xB6), reg_reg(res, res)); - ins_pipe( pipe_cmpxchg ); + ins_encode %{ + __ xchgptr($newval$$Register, $mem$$Address); + if (barrier_data() != ZLoadBarrierElided) { + z_load_barrier(_masm, this, Address(noreg, 0), $newval$$Register, noreg /* tmp */, false /* weak */); + } + %} + + ins_pipe(pipe_cmpxchg); %} - -instruct z_xchgP( memory mem, rRegP newval, rRegP keepalive) %{ - match(Set newval (ZGetAndSetP mem (Binary newval keepalive))); - format %{ "XCHGQ $newval,[$mem]" %} - ins_encode %{ - __ xchgq($newval$$Register, $mem$$Address); - %} - ins_pipe( pipe_cmpxchg ); -%} diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/x86/macroAssembler_x86.cpp --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -824,11 +824,13 @@ } void MacroAssembler::stop(const char* msg) { - address rip = pc(); - pusha(); // get regs on stack + if (ShowMessageBoxOnError) { + address rip = pc(); + pusha(); // get regs on stack + lea(c_rarg1, InternalAddress(rip)); + movq(c_rarg2, rsp); // pass pointer to regs array + } lea(c_rarg0, ExternalAddress((address) msg)); - lea(c_rarg1, InternalAddress(rip)); - movq(c_rarg2, rsp); // pass pointer to regs array andq(rsp, -16); // align stack as required by ABI call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); hlt(); @@ -6350,7 +6352,7 @@ movptr(result, str1); if (UseAVX >= 2) { cmpl(cnt1, stride); - jcc(Assembler::less, SCAN_TO_CHAR_LOOP); + jcc(Assembler::less, SCAN_TO_CHAR); cmpl(cnt1, 2*stride); jcc(Assembler::less, SCAN_TO_8_CHAR_INIT); movdl(vec1, ch); @@ -6377,10 +6379,8 @@ } bind(SCAN_TO_8_CHAR); cmpl(cnt1, stride); - if (UseAVX >= 2) { - jcc(Assembler::less, SCAN_TO_CHAR); - } else { - jcc(Assembler::less, SCAN_TO_CHAR_LOOP); + jcc(Assembler::less, SCAN_TO_CHAR); + if (UseAVX < 2) { movdl(vec1, ch); pshuflw(vec1, vec1, 0x00); pshufd(vec1, vec1, 0); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp --- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -34,6 +34,7 @@ #include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" +#include "oops/klass.inline.hpp" #include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/vframeArray.hpp" @@ -1303,6 +1304,97 @@ } } +// Registers need to be saved for runtime call +static Register caller_saved_registers[] = { + rcx, rdx, rsi, rdi +}; + +// Save caller saved registers except r1 and r2 +static void save_registers_except(MacroAssembler* masm, Register r1, Register r2) { + int reg_len = (int)(sizeof(caller_saved_registers) / sizeof(Register)); + for (int index = 0; index < reg_len; index ++) { + Register this_reg = caller_saved_registers[index]; + if (this_reg != r1 && this_reg != r2) { + __ push(this_reg); + } + } +} + +// Restore caller saved registers except r1 and r2 +static void restore_registers_except(MacroAssembler* masm, Register r1, Register r2) { + int reg_len = (int)(sizeof(caller_saved_registers) / sizeof(Register)); + for (int index = reg_len - 1; index >= 0; index --) { + Register this_reg = caller_saved_registers[index]; + if (this_reg != r1 && this_reg != r2) { + __ pop(this_reg); + } + } +} + +// Pin object, return pinned object or null in rax +static void gen_pin_object(MacroAssembler* masm, + Register thread, VMRegPair reg) { + __ block_comment("gen_pin_object {"); + + Label is_null; + Register tmp_reg = rax; + VMRegPair tmp(tmp_reg->as_VMReg()); + if (reg.first()->is_stack()) { + // Load the arg up from the stack + simple_move32(masm, reg, tmp); + reg = tmp; + } else { + __ movl(tmp_reg, reg.first()->as_Register()); + } + __ testptr(reg.first()->as_Register(), reg.first()->as_Register()); + __ jccb(Assembler::equal, is_null); + + // Save registers that may be used by runtime call + Register arg = reg.first()->is_Register() ? reg.first()->as_Register() : noreg; + save_registers_except(masm, arg, thread); + + __ call_VM_leaf( + CAST_FROM_FN_PTR(address, SharedRuntime::pin_object), + thread, reg.first()->as_Register()); + + // Restore saved registers + restore_registers_except(masm, arg, thread); + + __ bind(is_null); + __ block_comment("} gen_pin_object"); +} + +// Unpin object +static void gen_unpin_object(MacroAssembler* masm, + Register thread, VMRegPair reg) { + __ block_comment("gen_unpin_object {"); + Label is_null; + + // temp register + __ push(rax); + Register tmp_reg = rax; + VMRegPair tmp(tmp_reg->as_VMReg()); + + simple_move32(masm, reg, tmp); + + __ testptr(rax, rax); + __ jccb(Assembler::equal, is_null); + + // Save registers that may be used by runtime call + Register arg = reg.first()->is_Register() ? reg.first()->as_Register() : noreg; + save_registers_except(masm, arg, thread); + + __ call_VM_leaf( + CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object), + thread, rax); + + // Restore saved registers + restore_registers_except(masm, arg, thread); + __ bind(is_null); + __ pop(rax); + __ block_comment("} gen_unpin_object"); +} + // Check GCLocker::needs_gc and enter the runtime if it's true. This // keeps a new JNI critical region from starting until a GC has been // forced. Save down any oops in registers and describe them in an @@ -1836,7 +1928,7 @@ __ get_thread(thread); - if (is_critical_native) { + if (is_critical_native && !Universe::heap()->supports_object_pinning()) { check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args, oop_handle_offset, oop_maps, in_regs, in_sig_bt); } @@ -1874,6 +1966,11 @@ // OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); + // Inbound arguments that need to be pinned for critical natives + GrowableArray pinned_args(total_in_args); + // Current stack slot for storing register based array argument + int pinned_slot = oop_handle_offset; + // Mark location of rbp, // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg()); @@ -1885,7 +1982,28 @@ switch (in_sig_bt[i]) { case T_ARRAY: if (is_critical_native) { - unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]); + VMRegPair in_arg = in_regs[i]; + if (Universe::heap()->supports_object_pinning()) { + // gen_pin_object handles save and restore + // of any clobbered registers + gen_pin_object(masm, thread, in_arg); + pinned_args.append(i); + + // rax has pinned array + VMRegPair result_reg(rax->as_VMReg()); + if (!in_arg.first()->is_stack()) { + assert(pinned_slot <= stack_slots, "overflow"); + simple_move32(masm, result_reg, VMRegImpl::stack2reg(pinned_slot)); + pinned_slot += VMRegImpl::slots_per_word; + } else { + // Write back pinned value, it will be used to unpin this argument + __ movptr(Address(rbp, reg2offset_in(in_arg.first())), result_reg.first()->as_Register()); + } + // We have the array in register, use it + in_arg = result_reg; + } + + unpack_array_argument(masm, in_arg, in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]); c_arg++; break; } @@ -2078,6 +2196,26 @@ default : ShouldNotReachHere(); } + // unpin pinned arguments + pinned_slot = oop_handle_offset; + if (pinned_args.length() > 0) { + // save return value that may be overwritten otherwise. + save_native_result(masm, ret_type, stack_slots); + for (int index = 0; index < pinned_args.length(); index ++) { + int i = pinned_args.at(index); + assert(pinned_slot <= stack_slots, "overflow"); + if (!in_regs[i].first()->is_stack()) { + int offset = pinned_slot * VMRegImpl::stack_slot_size; + __ movl(in_regs[i].first()->as_Register(), Address(rsp, offset)); + pinned_slot += VMRegImpl::slots_per_word; + } + // gen_pin_object handles save and restore + // of any other clobbered registers + gen_unpin_object(masm, thread, in_regs[i]); + } + restore_native_result(masm, ret_type, stack_slots); + } + // Switch thread to "native transition" state before reading the synchronization state. // This additional state is necessary because reading and testing the synchronization // state is not atomic w.r.t. GC, as this scenario demonstrates: diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp --- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -41,6 +41,7 @@ #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/compiledICHolder.hpp" +#include "oops/klass.inline.hpp" #include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/vframeArray.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/x86/x86.ad --- a/src/hotspot/cpu/x86/x86.ad Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/x86/x86.ad Wed Oct 16 15:31:05 2019 +0200 @@ -1097,138 +1097,6 @@ reg_class_dynamic vectorz_reg(vectorz_reg_evex, vectorz_reg_legacy, %{ VM_Version::supports_evex() %} ); reg_class_dynamic vectorz_reg_vl(vectorz_reg_evex, vectorz_reg_legacy, %{ VM_Version::supports_evex() && VM_Version::supports_avx512vl() %} ); -reg_class xmm0_reg(XMM0, XMM0b, XMM0c, XMM0d); -reg_class ymm0_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h); -reg_class zmm0_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, XMM0i, XMM0j, XMM0k, XMM0l, XMM0m, XMM0n, XMM0o, XMM0p); - -reg_class xmm1_reg(XMM1, XMM1b, XMM1c, XMM1d); -reg_class ymm1_reg(XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h); -reg_class zmm1_reg(XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, XMM1i, XMM1j, XMM1k, XMM1l, XMM1m, XMM1n, XMM1o, XMM1p); - -reg_class xmm2_reg(XMM2, XMM2b, XMM2c, XMM2d); -reg_class ymm2_reg(XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h); -reg_class zmm2_reg(XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, XMM2i, XMM2j, XMM2k, XMM2l, XMM2m, XMM2n, XMM2o, XMM2p); - -reg_class xmm3_reg(XMM3, XMM3b, XMM3c, XMM3d); -reg_class ymm3_reg(XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h); -reg_class zmm3_reg(XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, XMM3i, XMM3j, XMM3k, XMM3l, XMM3m, XMM3n, XMM3o, XMM3p); - -reg_class xmm4_reg(XMM4, XMM4b, XMM4c, XMM4d); -reg_class ymm4_reg(XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h); -reg_class zmm4_reg(XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, XMM4i, XMM4j, XMM4k, XMM4l, XMM4m, XMM4n, XMM4o, XMM4p); - -reg_class xmm5_reg(XMM5, XMM5b, XMM5c, XMM5d); -reg_class ymm5_reg(XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h); -reg_class zmm5_reg(XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, XMM5i, XMM5j, XMM5k, XMM5l, XMM5m, XMM5n, XMM5o, XMM5p); - -reg_class xmm6_reg(XMM6, XMM6b, XMM6c, XMM6d); -reg_class ymm6_reg(XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h); -reg_class zmm6_reg(XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, XMM6i, XMM6j, XMM6k, XMM6l, XMM6m, XMM6n, XMM6o, XMM6p); - -reg_class xmm7_reg(XMM7, XMM7b, XMM7c, XMM7d); -reg_class ymm7_reg(XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h); -reg_class zmm7_reg(XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, XMM7i, XMM7j, XMM7k, XMM7l, XMM7m, XMM7n, XMM7o, XMM7p); - -#ifdef _LP64 - -reg_class xmm8_reg(XMM8, XMM8b, XMM8c, XMM8d); -reg_class ymm8_reg(XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h); -reg_class zmm8_reg(XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM8i, XMM8j, XMM8k, XMM8l, XMM8m, XMM8n, XMM8o, XMM8p); - -reg_class xmm9_reg(XMM9, XMM9b, XMM9c, XMM9d); -reg_class ymm9_reg(XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h); -reg_class zmm9_reg(XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, XMM9i, XMM9j, XMM9k, XMM9l, XMM9m, XMM9n, XMM9o, XMM9p); - -reg_class xmm10_reg(XMM10, XMM10b, XMM10c, XMM10d); -reg_class ymm10_reg(XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h); -reg_class zmm10_reg(XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, XMM10i, XMM10j, XMM10k, XMM10l, XMM10m, XMM10n, XMM10o, XMM10p); - -reg_class xmm11_reg(XMM11, XMM11b, XMM11c, XMM11d); -reg_class ymm11_reg(XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h); -reg_class zmm11_reg(XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, XMM11i, XMM11j, XMM11k, XMM11l, XMM11m, XMM11n, XMM11o, XMM11p); - -reg_class xmm12_reg(XMM12, XMM12b, XMM12c, XMM12d); -reg_class ymm12_reg(XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h); -reg_class zmm12_reg(XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, XMM12i, XMM12j, XMM12k, XMM12l, XMM12m, XMM12n, XMM12o, XMM12p); - -reg_class xmm13_reg(XMM13, XMM13b, XMM13c, XMM13d); -reg_class ymm13_reg(XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h); -reg_class zmm13_reg(XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, XMM13i, XMM13j, XMM13k, XMM13l, XMM13m, XMM13n, XMM13o, XMM13p); - -reg_class xmm14_reg(XMM14, XMM14b, XMM14c, XMM14d); -reg_class ymm14_reg(XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h); -reg_class zmm14_reg(XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, XMM14i, XMM14j, XMM14k, XMM14l, XMM14m, XMM14n, XMM14o, XMM14p); - -reg_class xmm15_reg(XMM15, XMM15b, XMM15c, XMM15d); -reg_class ymm15_reg(XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h); -reg_class zmm15_reg(XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h, XMM15i, XMM15j, XMM15k, XMM15l, XMM15m, XMM15n, XMM15o, XMM15p); - -reg_class xmm16_reg(XMM16, XMM16b, XMM16c, XMM16d); -reg_class ymm16_reg(XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h); -reg_class zmm16_reg(XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h, XMM16i, XMM16j, XMM16k, XMM16l, XMM16m, XMM16n, XMM16o, XMM16p); - -reg_class xmm17_reg(XMM17, XMM17b, XMM17c, XMM17d); -reg_class ymm17_reg(XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h); -reg_class zmm17_reg(XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h, XMM17i, XMM17j, XMM17k, XMM17l, XMM17m, XMM17n, XMM17o, XMM17p); - -reg_class xmm18_reg(XMM18, XMM18b, XMM18c, XMM18d); -reg_class ymm18_reg(XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h); -reg_class zmm18_reg(XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h, XMM18i, XMM18j, XMM18k, XMM18l, XMM18m, XMM18n, XMM18o, XMM18p); - -reg_class xmm19_reg(XMM19, XMM19b, XMM19c, XMM19d); -reg_class ymm19_reg(XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h); -reg_class zmm19_reg(XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h, XMM19i, XMM19j, XMM19k, XMM19l, XMM19m, XMM19n, XMM19o, XMM19p); - -reg_class xmm20_reg(XMM20, XMM20b, XMM20c, XMM20d); -reg_class ymm20_reg(XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h); -reg_class zmm20_reg(XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h, XMM20i, XMM20j, XMM20k, XMM20l, XMM20m, XMM20n, XMM20o, XMM20p); - -reg_class xmm21_reg(XMM21, XMM21b, XMM21c, XMM21d); -reg_class ymm21_reg(XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h); -reg_class zmm21_reg(XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h, XMM21i, XMM21j, XMM21k, XMM21l, XMM21m, XMM21n, XMM21o, XMM21p); - -reg_class xmm22_reg(XMM22, XMM22b, XMM22c, XMM22d); -reg_class ymm22_reg(XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h); -reg_class zmm22_reg(XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h, XMM22i, XMM22j, XMM22k, XMM22l, XMM22m, XMM22n, XMM22o, XMM22p); - -reg_class xmm23_reg(XMM23, XMM23b, XMM23c, XMM23d); -reg_class ymm23_reg(XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h); -reg_class zmm23_reg(XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h, XMM23i, XMM23j, XMM23k, XMM23l, XMM23m, XMM23n, XMM23o, XMM23p); - -reg_class xmm24_reg(XMM24, XMM24b, XMM24c, XMM24d); -reg_class ymm24_reg(XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h); -reg_class zmm24_reg(XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h, XMM24i, XMM24j, XMM24k, XMM24l, XMM24m, XMM24n, XMM24o, XMM24p); - -reg_class xmm25_reg(XMM25, XMM25b, XMM25c, XMM25d); -reg_class ymm25_reg(XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h); -reg_class zmm25_reg(XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h, XMM25i, XMM25j, XMM25k, XMM25l, XMM25m, XMM25n, XMM25o, XMM25p); - -reg_class xmm26_reg(XMM26, XMM26b, XMM26c, XMM26d); -reg_class ymm26_reg(XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h); -reg_class zmm26_reg(XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h, XMM26i, XMM26j, XMM26k, XMM26l, XMM26m, XMM26n, XMM26o, XMM26p); - -reg_class xmm27_reg(XMM27, XMM27b, XMM27c, XMM27d); -reg_class ymm27_reg(XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h); -reg_class zmm27_reg(XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h, XMM27i, XMM27j, XMM27k, XMM27l, XMM27m, XMM27n, XMM27o, XMM27p); - -reg_class xmm28_reg(XMM28, XMM28b, XMM28c, XMM28d); -reg_class ymm28_reg(XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h); -reg_class zmm28_reg(XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h, XMM28i, XMM28j, XMM28k, XMM28l, XMM28m, XMM28n, XMM28o, XMM28p); - -reg_class xmm29_reg(XMM29, XMM29b, XMM29c, XMM29d); -reg_class ymm29_reg(XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h); -reg_class zmm29_reg(XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h, XMM29i, XMM29j, XMM29k, XMM29l, XMM29m, XMM29n, XMM29o, XMM29p); - -reg_class xmm30_reg(XMM30, XMM30b, XMM30c, XMM30d); -reg_class ymm30_reg(XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h); -reg_class zmm30_reg(XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h, XMM30i, XMM30j, XMM30k, XMM30l, XMM30m, XMM30n, XMM30o, XMM30p); - -reg_class xmm31_reg(XMM31, XMM31b, XMM31c, XMM31d); -reg_class ymm31_reg(XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h); -reg_class zmm31_reg(XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h, XMM31i, XMM31j, XMM31k, XMM31l, XMM31m, XMM31n, XMM31o, XMM31p); - -#endif - %} @@ -1412,7 +1280,7 @@ case Op_AbsVS: case Op_AbsVI: case Op_AddReductionVI: - if (UseSSE < 3) // requires at least SSE3 + if (UseSSE < 3 || !VM_Version::supports_ssse3()) // requires at least SSSE3 ret_value = false; break; case Op_MulReductionVI: @@ -1800,8 +1668,8 @@ return (UseAVX > 2) ? 6 : 4; } -static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load, - int stack_offset, int reg, uint ireg, outputStream* st) { +int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load, + int stack_offset, int reg, uint ireg, outputStream* st) { // In 64-bit VM size calculation is very complex. Emitting instructions // into scratch buffer is used to get size in 64-bit VM. LP64_ONLY( assert(!do_size, "this method calculates size only for 32-bit VM"); ) diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/cpu/x86/x86_64.ad --- a/src/hotspot/cpu/x86/x86_64.ad Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/cpu/x86/x86_64.ad Wed Oct 16 15:31:05 2019 +0200 @@ -1058,8 +1058,8 @@ static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo, int src_hi, int dst_hi, uint ireg, outputStream* st); -static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load, - int stack_offset, int reg, uint ireg, outputStream* st); +int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load, + int stack_offset, int reg, uint ireg, outputStream* st); static void vec_stack_to_stack_helper(CodeBuffer *cbuf, int src_offset, int dst_offset, uint ireg, outputStream* st) { @@ -4260,200 +4260,6 @@ %} %} -// Operands for bound floating pointer register arguments -operand rxmm0() %{ - constraint(ALLOC_IN_RC(xmm0_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm1() %{ - constraint(ALLOC_IN_RC(xmm1_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm2() %{ - constraint(ALLOC_IN_RC(xmm2_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm3() %{ - constraint(ALLOC_IN_RC(xmm3_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm4() %{ - constraint(ALLOC_IN_RC(xmm4_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm5() %{ - constraint(ALLOC_IN_RC(xmm5_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm6() %{ - constraint(ALLOC_IN_RC(xmm6_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm7() %{ - constraint(ALLOC_IN_RC(xmm7_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm8() %{ - constraint(ALLOC_IN_RC(xmm8_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm9() %{ - constraint(ALLOC_IN_RC(xmm9_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm10() %{ - constraint(ALLOC_IN_RC(xmm10_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm11() %{ - constraint(ALLOC_IN_RC(xmm11_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm12() %{ - constraint(ALLOC_IN_RC(xmm12_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm13() %{ - constraint(ALLOC_IN_RC(xmm13_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm14() %{ - constraint(ALLOC_IN_RC(xmm14_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm15() %{ - constraint(ALLOC_IN_RC(xmm15_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm16() %{ - constraint(ALLOC_IN_RC(xmm16_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm17() %{ - constraint(ALLOC_IN_RC(xmm17_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm18() %{ - constraint(ALLOC_IN_RC(xmm18_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm19() %{ - constraint(ALLOC_IN_RC(xmm19_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm20() %{ - constraint(ALLOC_IN_RC(xmm20_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm21() %{ - constraint(ALLOC_IN_RC(xmm21_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm22() %{ - constraint(ALLOC_IN_RC(xmm22_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm23() %{ - constraint(ALLOC_IN_RC(xmm23_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm24() %{ - constraint(ALLOC_IN_RC(xmm24_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm25() %{ - constraint(ALLOC_IN_RC(xmm25_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm26() %{ - constraint(ALLOC_IN_RC(xmm26_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm27() %{ - constraint(ALLOC_IN_RC(xmm27_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm28() %{ - constraint(ALLOC_IN_RC(xmm28_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm29() %{ - constraint(ALLOC_IN_RC(xmm29_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm30() %{ - constraint(ALLOC_IN_RC(xmm30_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} -operand rxmm31() %{ - constraint(ALLOC_IN_RC(xmm31_reg)); - match(VecX); - format%{%} - interface(REG_INTER); -%} - //----------OPERAND CLASSES---------------------------------------------------- // Operand Classes are groups of operands that are used as to simplify // instruction definitions by not requiring the AD writer to specify separate @@ -5346,6 +5152,7 @@ instruct loadP(rRegP dst, memory mem) %{ match(Set dst (LoadP mem)); + predicate(n->as_Load()->barrier_data() == 0); ins_cost(125); // XXX format %{ "movq $dst, $mem\t# ptr" %} @@ -7794,6 +7601,7 @@ rax_RegP oldval, rRegP newval, rFlagsReg cr) %{ + predicate(n->as_LoadStore()->barrier_data() == 0); match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval))); format %{ "cmpxchgq $heap_top_ptr, $newval\t# (ptr) " @@ -7845,7 +7653,7 @@ rax_RegP oldval, rRegP newval, rFlagsReg cr) %{ - predicate(VM_Version::supports_cx8()); + predicate(VM_Version::supports_cx8() && n->as_LoadStore()->barrier_data() == 0); match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval))); match(Set res (WeakCompareAndSwapP mem_ptr (Binary oldval newval))); effect(KILL cr, KILL oldval); @@ -8087,7 +7895,7 @@ rax_RegP oldval, rRegP newval, rFlagsReg cr) %{ - predicate(VM_Version::supports_cx8()); + predicate(VM_Version::supports_cx8() && n->as_LoadStore()->barrier_data() == 0); match(Set oldval (CompareAndExchangeP mem_ptr (Binary oldval newval))); effect(KILL cr); @@ -8232,6 +8040,7 @@ instruct xchgP( memory mem, rRegP newval) %{ match(Set newval (GetAndSetP mem newval)); + predicate(n->as_LoadStore()->barrier_data() == 0); format %{ "XCHGQ $newval,[$mem]" %} ins_encode %{ __ xchgq($newval$$Register, $mem$$Address); @@ -11974,6 +11783,7 @@ instruct compP_rReg_mem(rFlagsRegU cr, rRegP op1, memory op2) %{ match(Set cr (CmpP op1 (LoadP op2))); + predicate(n->in(2)->as_Load()->barrier_data() == 0); ins_cost(500); // XXX format %{ "cmpq $op1, $op2\t# ptr" %} @@ -11999,7 +11809,8 @@ // and raw pointers have no anti-dependencies. instruct compP_mem_rReg(rFlagsRegU cr, rRegP op1, memory op2) %{ - predicate(n->in(2)->in(2)->bottom_type()->reloc() == relocInfo::none); + predicate(n->in(2)->in(2)->bottom_type()->reloc() == relocInfo::none && + n->in(2)->as_Load()->barrier_data() == 0); match(Set cr (CmpP op1 (LoadP op2))); format %{ "cmpq $op1, $op2\t# raw ptr" %} @@ -12024,7 +11835,8 @@ // any compare to a zero should be eq/neq. instruct testP_mem(rFlagsReg cr, memory op, immP0 zero) %{ - predicate(!UseCompressedOops || (CompressedOops::base() != NULL)); + predicate((!UseCompressedOops || (CompressedOops::base() != NULL)) && + n->in(1)->as_Load()->barrier_data() == 0); match(Set cr (CmpP (LoadP op) zero)); ins_cost(500); // XXX @@ -12037,7 +11849,9 @@ instruct testP_mem_reg0(rFlagsReg cr, memory mem, immP0 zero) %{ - predicate(UseCompressedOops && (CompressedOops::base() == NULL) && (CompressedKlassPointers::base() == NULL)); + predicate(UseCompressedOops && (CompressedOops::base() == NULL) && + (CompressedKlassPointers::base() == NULL) && + n->in(1)->as_Load()->barrier_data() == 0); match(Set cr (CmpP (LoadP mem) zero)); format %{ "cmpq R12, $mem\t# ptr (R12_heapbase==0)" %} diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/os/aix/os_aix.cpp --- a/src/hotspot/os/aix/os_aix.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/os/aix/os_aix.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -132,18 +132,6 @@ #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103 // excerpts from systemcfg.h that might be missing on older os levels -#ifndef PV_5_Compat - #define PV_5_Compat 0x0F8000 /* Power PC 5 */ -#endif -#ifndef PV_6 - #define PV_6 0x100000 /* Power PC 6 */ -#endif -#ifndef PV_6_1 - #define PV_6_1 0x100001 /* Power PC 6 DD1.x */ -#endif -#ifndef PV_6_Compat - #define PV_6_Compat 0x108000 /* Power PC 6 */ -#endif #ifndef PV_7 #define PV_7 0x200000 /* Power PC 7 */ #endif @@ -156,6 +144,13 @@ #ifndef PV_8_Compat #define PV_8_Compat 0x308000 /* Power PC 8 */ #endif +#ifndef PV_9 + #define PV_9 0x400000 /* Power PC 9 */ +#endif +#ifndef PV_9_Compat + #define PV_9_Compat 0x408000 /* Power PC 9 */ +#endif + static address resolve_function_descriptor_to_code_pointer(address p); @@ -1386,15 +1381,7 @@ void os::print_os_info(outputStream* st) { st->print("OS:"); - st->print("uname:"); - struct utsname name; - uname(&name); - st->print(name.sysname); st->print(" "); - st->print(name.nodename); st->print(" "); - st->print(name.release); st->print(" "); - st->print(name.version); st->print(" "); - st->print(name.machine); - st->cr(); + os::Posix::print_uname_info(st); uint32_t ver = os::Aix::os_version(); st->print_cr("AIX kernel version %u.%u.%u.%u", @@ -1402,16 +1389,12 @@ os::Posix::print_rlimit_info(st); + os::Posix::print_load_average(st); + // _SC_THREAD_THREADS_MAX is the maximum number of threads within a process. long tmax = sysconf(_SC_THREAD_THREADS_MAX); st->print_cr("maximum #threads within a process:%ld", tmax); - // load average - st->print("load average:"); - double loadavg[3] = {-1.L, -1.L, -1.L}; - os::loadavg(loadavg, 3); - st->print_cr("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]); - // print wpar info libperfstat::wparinfo_t wi; if (libperfstat::get_wparinfo(&wi)) { @@ -1504,6 +1487,9 @@ void os::get_summary_cpu_info(char* buf, size_t buflen) { // read _system_configuration.version switch (_system_configuration.version) { + case PV_9: + strncpy(buf, "Power PC 9", buflen); + break; case PV_8: strncpy(buf, "Power PC 8", buflen); break; @@ -1537,6 +1523,9 @@ case PV_8_Compat: strncpy(buf, "PV_8_Compat", buflen); break; + case PV_9_Compat: + strncpy(buf, "PV_9_Compat", buflen); + break; default: strncpy(buf, "unknown", buflen); } @@ -2654,8 +2643,24 @@ 60 // 11 CriticalPriority }; +static int prio_init() { + if (ThreadPriorityPolicy == 1) { + if (geteuid() != 0) { + if (!FLAG_IS_DEFAULT(ThreadPriorityPolicy)) { + warning("-XX:ThreadPriorityPolicy=1 may require system level permission, " \ + "e.g., being the root user. If the necessary permission is not " \ + "possessed, changes to priority will be silently ignored."); + } + } + } + if (UseCriticalJavaThreadPriority) { + os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; + } + return 0; +} + OSReturn os::set_native_priority(Thread* thread, int newpri) { - if (!UseThreadPriorities) return OS_OK; + if (!UseThreadPriorities || ThreadPriorityPolicy == 0) return OS_OK; pthread_t thr = thread->osthread()->pthread_id(); int policy = SCHED_OTHER; struct sched_param param; @@ -2670,7 +2675,7 @@ } OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) { - if (!UseThreadPriorities) { + if (!UseThreadPriorities || ThreadPriorityPolicy == 0) { *priority_ptr = java_to_os_priority[NormPriority]; return OS_OK; } @@ -2767,7 +2772,7 @@ os::SuspendResume::State state = osthread->sr.suspended(); if (state == os::SuspendResume::SR_SUSPENDED) { sigset_t suspend_set; // signals for sigsuspend() - + sigemptyset(&suspend_set); // get current set of blocked signals and unblock resume signal pthread_sigmask(SIG_BLOCK, NULL, &suspend_set); sigdelset(&suspend_set, SR_signum); @@ -3053,6 +3058,7 @@ // try to honor the signal mask sigset_t oset; + sigemptyset(&oset); pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset); // call into the chained handler @@ -3063,7 +3069,7 @@ } // restore the signal mask - pthread_sigmask(SIG_SETMASK, &oset, 0); + pthread_sigmask(SIG_SETMASK, &oset, NULL); } // Tell jvm's signal handler the signal is taken care of. return true; @@ -3579,6 +3585,9 @@ } } + // initialize thread priority policy + prio_init(); + return JNI_OK; } @@ -4020,7 +4029,7 @@ void os::pause() { char filename[MAX_PATH]; if (PauseAtStartupFile && PauseAtStartupFile[0]) { - jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); + jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile); } else { jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/os/bsd/os_bsd.cpp --- a/src/hotspot/os/bsd/os_bsd.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/os/bsd/os_bsd.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -3671,7 +3671,7 @@ void os::pause() { char filename[MAX_PATH]; if (PauseAtStartupFile && PauseAtStartupFile[0]) { - jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); + jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile); } else { jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/os/posix/os_posix.cpp --- a/src/hotspot/os/posix/os_posix.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/os/posix/os_posix.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -373,8 +373,12 @@ void os::Posix::print_load_average(outputStream* st) { st->print("load average:"); double loadavg[3]; - os::loadavg(loadavg, 3); - st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]); + int res = os::loadavg(loadavg, 3); + if (res != -1) { + st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]); + } else { + st->print(" Unavailable"); + } st->cr(); } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/os/solaris/os_solaris.cpp --- a/src/hotspot/os/solaris/os_solaris.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/os/solaris/os_solaris.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -4428,7 +4428,7 @@ void os::pause() { char filename[MAX_PATH]; if (PauseAtStartupFile && PauseAtStartupFile[0]) { - jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); + jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile); } else { jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/os/windows/os_windows.cpp --- a/src/hotspot/os/windows/os_windows.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/os/windows/os_windows.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -4975,7 +4975,7 @@ void os::pause() { char filename[MAX_PATH]; if (PauseAtStartupFile && PauseAtStartupFile[0]) { - jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); + jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile); } else { jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/adlc/formssel.cpp --- a/src/hotspot/share/adlc/formssel.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/adlc/formssel.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -773,11 +773,6 @@ !strcmp(_matrule->_rChild->_opType,"CheckCastPP") || !strcmp(_matrule->_rChild->_opType,"GetAndSetP") || !strcmp(_matrule->_rChild->_opType,"GetAndSetN") || -#if INCLUDE_ZGC - !strcmp(_matrule->_rChild->_opType,"ZGetAndSetP") || - !strcmp(_matrule->_rChild->_opType,"ZCompareAndExchangeP") || - !strcmp(_matrule->_rChild->_opType,"LoadBarrierSlowReg") || -#endif #if INCLUDE_SHENANDOAHGC !strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeP") || !strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeN") || @@ -3510,9 +3505,6 @@ "StoreCM", "GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP", "GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN", -#if INCLUDE_ZGC - "ZGetAndSetP", "ZCompareAndSwapP", "ZCompareAndExchangeP", "ZWeakCompareAndSwapP", -#endif "ClearArray" }; int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/aot/aotCodeHeap.cpp --- a/src/hotspot/share/aot/aotCodeHeap.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/aot/aotCodeHeap.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -37,6 +37,7 @@ #include "memory/allocation.inline.hpp" #include "memory/universe.hpp" #include "oops/compressedOops.hpp" +#include "oops/klass.inline.hpp" #include "oops/method.inline.hpp" #include "runtime/deoptimization.hpp" #include "runtime/handles.inline.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/aot/aotCompiledMethod.cpp --- a/src/hotspot/share/aot/aotCompiledMethod.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/aot/aotCompiledMethod.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -32,6 +32,7 @@ #include "compiler/compilerOracle.hpp" #include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.hpp" +#include "oops/klass.inline.hpp" #include "oops/method.inline.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/c1/c1_GraphBuilder.cpp --- a/src/hotspot/share/c1/c1_GraphBuilder.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -33,13 +33,13 @@ #include "ci/ciKlass.hpp" #include "ci/ciMemberName.hpp" #include "ci/ciUtilities.inline.hpp" +#include "compiler/compilationPolicy.hpp" #include "compiler/compileBroker.hpp" #include "interpreter/bytecode.hpp" #include "jfr/jfrEvents.hpp" #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/sharedRuntime.hpp" -#include "runtime/compilationPolicy.hpp" #include "runtime/vm_version.hpp" #include "utilities/bitMap.inline.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/c1/c1_LIRGenerator.cpp --- a/src/hotspot/share/c1/c1_LIRGenerator.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -36,6 +36,7 @@ #include "ci/ciUtilities.hpp" #include "gc/shared/barrierSet.hpp" #include "gc/shared/c1/barrierSetC1.hpp" +#include "oops/klass.inline.hpp" #include "runtime/arguments.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/c1/c1_Runtime1.cpp --- a/src/hotspot/share/c1/c1_Runtime1.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/c1/c1_Runtime1.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -37,6 +37,7 @@ #include "code/pcDesc.hpp" #include "code/scopeDesc.hpp" #include "code/vtableStubs.hpp" +#include "compiler/compilationPolicy.hpp" #include "compiler/disassembler.hpp" #include "gc/shared/barrierSet.hpp" #include "gc/shared/c1/barrierSetC1.hpp" @@ -55,7 +56,6 @@ #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/biasedLocking.hpp" -#include "runtime/compilationPolicy.hpp" #include "runtime/fieldDescriptor.inline.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/c1/c1_ValueStack.cpp --- a/src/hotspot/share/c1/c1_ValueStack.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/c1/c1_ValueStack.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -42,31 +42,21 @@ verify(); } - ValueStack::ValueStack(ValueStack* copy_from, Kind kind, int bci) : _scope(copy_from->scope()) , _caller_state(copy_from->caller_state()) , _bci(bci) , _kind(kind) - , _locals() - , _stack() + , _locals(copy_from->locals_size_for_copy(kind)) + , _stack(copy_from->stack_size_for_copy(kind)) , _locks(copy_from->locks_size() == 0 ? NULL : new Values(copy_from->locks_size())) { assert(kind != EmptyExceptionState || !Compilation::current()->env()->should_retain_local_variables(), "need locals"); if (kind != EmptyExceptionState) { - // only allocate space if we need to copy the locals-array - _locals = Values(copy_from->locals_size()); _locals.appendAll(©_from->_locals); } if (kind != ExceptionState && kind != EmptyExceptionState) { - if (kind == Parsing) { - // stack will be modified, so reserve enough space to avoid resizing - _stack = Values(scope()->method()->max_stack()); - } else { - // stack will not be modified, so do not waste space - _stack = Values(copy_from->stack_size()); - } _stack.appendAll(©_from->_stack); } @@ -77,6 +67,25 @@ verify(); } +int ValueStack::locals_size_for_copy(Kind kind) const { + if (kind != EmptyExceptionState) { + return locals_size(); + } + return 0; +} + +int ValueStack::stack_size_for_copy(Kind kind) const { + if (kind != ExceptionState && kind != EmptyExceptionState) { + if (kind == Parsing) { + // stack will be modified, so reserve enough space to avoid resizing + return scope()->method()->max_stack(); + } else { + // stack will not be modified, so do not waste space + return stack_size(); + } + } + return 0; +} bool ValueStack::is_same(ValueStack* s) { if (scope() != s->scope()) return false; diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/c1/c1_ValueStack.hpp --- a/src/hotspot/share/c1/c1_ValueStack.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/c1/c1_ValueStack.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -65,6 +65,8 @@ // for simplified copying ValueStack(ValueStack* copy_from, Kind kind, int bci); + int locals_size_for_copy(Kind kind) const; + int stack_size_for_copy(Kind kind) const; public: // creation ValueStack(IRScope* scope, ValueStack* caller_state); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/ci/ciEnv.cpp --- a/src/hotspot/share/ci/ciEnv.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/ci/ciEnv.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -238,6 +238,7 @@ _jvmti_can_access_local_variables = JvmtiExport::can_access_local_variables(); _jvmti_can_post_on_exceptions = JvmtiExport::can_post_on_exceptions(); _jvmti_can_pop_frame = JvmtiExport::can_pop_frame(); + _jvmti_can_get_owned_monitor_info = JvmtiExport::can_get_owned_monitor_info(); } bool ciEnv::jvmti_state_changed() const { @@ -262,6 +263,10 @@ JvmtiExport::can_pop_frame()) { return true; } + if (!_jvmti_can_get_owned_monitor_info && + JvmtiExport::can_get_owned_monitor_info()) { + return true; + } return false; } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/ci/ciEnv.hpp --- a/src/hotspot/share/ci/ciEnv.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/ci/ciEnv.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -73,6 +73,7 @@ bool _jvmti_can_access_local_variables; bool _jvmti_can_post_on_exceptions; bool _jvmti_can_pop_frame; + bool _jvmti_can_get_owned_monitor_info; // includes can_get_owned_monitor_stack_depth_info // Cache DTrace flags bool _dtrace_extended_probes; @@ -347,6 +348,7 @@ } bool jvmti_can_hotswap_or_post_breakpoint() const { return _jvmti_can_hotswap_or_post_breakpoint; } bool jvmti_can_post_on_exceptions() const { return _jvmti_can_post_on_exceptions; } + bool jvmti_can_get_owned_monitor_info() const { return _jvmti_can_get_owned_monitor_info; } // Cache DTrace flags void cache_dtrace_flags(); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/ci/ciMetadata.hpp --- a/src/hotspot/share/ci/ciMetadata.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/ci/ciMetadata.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -51,7 +51,6 @@ virtual bool is_metadata() const { return true; } virtual bool is_type() const { return false; } - virtual bool is_cpcache() const { return false; } virtual bool is_return_address() const { return false; } virtual bool is_method() const { return false; } virtual bool is_method_data() const { return false; } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/classfile/classLoader.cpp --- a/src/hotspot/share/classfile/classLoader.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/classfile/classLoader.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -57,7 +57,6 @@ #include "oops/symbol.hpp" #include "prims/jvm_misc.hpp" #include "runtime/arguments.hpp" -#include "runtime/compilationPolicy.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" #include "runtime/interfaceSupport.inline.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/classfile/classLoader.hpp --- a/src/hotspot/share/classfile/classLoader.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/classfile/classLoader.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -237,6 +237,8 @@ CDS_ONLY(static ClassPathEntry* app_classpath_entries() {return _app_classpath_entries;}) CDS_ONLY(static ClassPathEntry* module_path_entries() {return _module_path_entries;}) + static bool has_bootclasspath_append() { return _first_append_entry != NULL; } + protected: // Initialization: // - setup the boot loader's system class path diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/classfile/javaClasses.cpp --- a/src/hotspot/share/classfile/javaClasses.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/classfile/javaClasses.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -377,20 +377,24 @@ if (_to_java_string_fn == NULL) { void *lib_handle = os::native_java_library(); - _to_java_string_fn = CAST_TO_FN_PTR(to_java_string_fn_t, os::dll_lookup(lib_handle, "NewStringPlatform")); + _to_java_string_fn = CAST_TO_FN_PTR(to_java_string_fn_t, os::dll_lookup(lib_handle, "JNU_NewStringPlatform")); if (_to_java_string_fn == NULL) { fatal("NewStringPlatform missing"); } } jstring js = NULL; - { JavaThread* thread = (JavaThread*)THREAD; - assert(thread->is_Java_thread(), "must be java thread"); + { + assert(THREAD->is_Java_thread(), "must be java thread"); + JavaThread* thread = (JavaThread*)THREAD; HandleMark hm(thread); ThreadToNativeFromVM ttn(thread); js = (_to_java_string_fn)(thread->jni_environment(), str); } - return Handle(THREAD, JNIHandles::resolve(js)); + + Handle native_platform_string(THREAD, JNIHandles::resolve(js)); + JNIHandles::destroy_local(js); // destroy local JNIHandle. + return native_platform_string; } // Converts a Java String to a native C string that can be used for diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/classfile/systemDictionary.cpp --- a/src/hotspot/share/classfile/systemDictionary.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/classfile/systemDictionary.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -1205,10 +1205,8 @@ TempNewSymbol pkg_name = NULL; PackageEntry* pkg_entry = NULL; ModuleEntry* mod_entry = NULL; - const char* pkg_string = NULL; pkg_name = InstanceKlass::package_from_name(class_name, CHECK_false); if (pkg_name != NULL) { - pkg_string = pkg_name->as_C_string(); if (loader_data != NULL) { pkg_entry = loader_data->packages()->lookup_only(pkg_name); } @@ -1245,7 +1243,7 @@ // 3. or, the class is from an unamed module if (!ent->is_modules_image() && ik->is_shared_boot_class()) { // the class is from the -Xbootclasspath/a - if (pkg_string == NULL || + if (pkg_name == NULL || pkg_entry == NULL || pkg_entry->in_unnamed_module()) { assert(mod_entry == NULL || @@ -1257,8 +1255,7 @@ return false; } else { bool res = SystemDictionaryShared::is_shared_class_visible_for_classloader( - ik, class_loader, pkg_string, pkg_name, - pkg_entry, mod_entry, CHECK_(false)); + ik, class_loader, pkg_name, pkg_entry, mod_entry, CHECK_(false)); return res; } } @@ -1432,6 +1429,11 @@ // a named package within the unnamed module. In all cases, // limit visibility to search for the class only in the boot // loader's append path. + if (!ClassLoader::has_bootclasspath_append()) { + // If there is no bootclasspath append entry, no need to continue + // searching. + return NULL; + } search_only_bootloader_append = true; } } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/classfile/systemDictionaryShared.cpp --- a/src/hotspot/share/classfile/systemDictionaryShared.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -657,7 +657,6 @@ bool SystemDictionaryShared::is_shared_class_visible_for_classloader( InstanceKlass* ik, Handle class_loader, - const char* pkg_string, Symbol* pkg_name, PackageEntry* pkg_entry, ModuleEntry* mod_entry, @@ -684,7 +683,7 @@ } } else if (SystemDictionary::is_system_class_loader(class_loader())) { assert(ent != NULL, "shared class for system loader should have valid SharedClassPathEntry"); - if (pkg_string == NULL) { + if (pkg_name == NULL) { // The archived class is in the unnamed package. Currently, the boot image // does not contain any class in the unnamed package. assert(!ent->is_modules_image(), "Class in the unnamed package must be from the classpath"); @@ -906,14 +905,9 @@ return NULL; } - const RunTimeSharedClassInfo* record = find_record(&_unregistered_dictionary, class_name); + const RunTimeSharedClassInfo* record = find_record(&_unregistered_dictionary, &_dynamic_unregistered_dictionary, class_name); if (record == NULL) { - if (DynamicArchive::is_mapped()) { - record = find_record(&_dynamic_unregistered_dictionary, class_name); - } - if (record == NULL) { - return NULL; - } + return NULL; } int clsfile_size = cfs->length(); @@ -1413,29 +1407,34 @@ } const RunTimeSharedClassInfo* -SystemDictionaryShared::find_record(RunTimeSharedDictionary* dict, Symbol* name) { - if (UseSharedSpaces) { - unsigned int hash = primitive_hash(name); - return dict->lookup(name, hash, 0); - } else { +SystemDictionaryShared::find_record(RunTimeSharedDictionary* static_dict, RunTimeSharedDictionary* dynamic_dict, Symbol* name) { + if (!UseSharedSpaces || !name->is_shared()) { + // The names of all shared classes must also be a shared Symbol. return NULL; } + + unsigned int hash = primitive_hash(name); + const RunTimeSharedClassInfo* record = NULL; + if (!MetaspaceShared::is_shared_dynamic(name)) { + // The names of all shared classes in the static dict must also be in the + // static archive + record = static_dict->lookup(name, hash, 0); + } + + if (record == NULL && DynamicArchive::is_mapped()) { + record = dynamic_dict->lookup(name, hash, 0); + } + + return record; } InstanceKlass* SystemDictionaryShared::find_builtin_class(Symbol* name) { - const RunTimeSharedClassInfo* record = find_record(&_builtin_dictionary, name); - if (record) { + const RunTimeSharedClassInfo* record = find_record(&_builtin_dictionary, &_dynamic_builtin_dictionary, name); + if (record != NULL) { return record->_klass; + } else { + return NULL; } - - if (DynamicArchive::is_mapped()) { - record = find_record(&_dynamic_builtin_dictionary, name); - if (record) { - return record->_klass; - } - } - - return NULL; } void SystemDictionaryShared::update_shared_entry(InstanceKlass* k, int id) { diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/classfile/systemDictionaryShared.hpp --- a/src/hotspot/share/classfile/systemDictionaryShared.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/classfile/systemDictionaryShared.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -223,7 +223,9 @@ public: static InstanceKlass* find_builtin_class(Symbol* class_name); - static const RunTimeSharedClassInfo* find_record(RunTimeSharedDictionary* dict, Symbol* name); + static const RunTimeSharedClassInfo* find_record(RunTimeSharedDictionary* static_dict, + RunTimeSharedDictionary* dynamic_dict, + Symbol* name); static bool has_platform_or_app_classes(); @@ -240,7 +242,6 @@ static bool is_sharing_possible(ClassLoaderData* loader_data); static bool is_shared_class_visible_for_classloader(InstanceKlass* ik, Handle class_loader, - const char* pkg_string, Symbol* pkg_name, PackageEntry* pkg_entry, ModuleEntry* mod_entry, diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/classfile/verificationType.cpp --- a/src/hotspot/share/classfile/verificationType.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/classfile/verificationType.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -28,6 +28,7 @@ #include "classfile/verificationType.hpp" #include "classfile/verifier.hpp" #include "logging/log.hpp" +#include "oops/klass.inline.hpp" #include "runtime/handles.inline.hpp" VerificationType VerificationType::from_tag(u1 tag) { diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/classfile/verifier.cpp --- a/src/hotspot/share/classfile/verifier.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/classfile/verifier.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -63,29 +63,39 @@ #define STATIC_METHOD_IN_INTERFACE_MAJOR_VERSION 52 #define MAX_ARRAY_DIMENSIONS 255 -// Access to external entry for VerifyClassCodes - old byte code verifier +// Access to external entry for VerifyClassForMajorVersion - old byte code verifier extern "C" { - typedef jboolean (*verify_byte_codes_fn_t)(JNIEnv *, jclass, char *, jint); - typedef jboolean (*verify_byte_codes_fn_new_t)(JNIEnv *, jclass, char *, jint, jint); + typedef jboolean (*verify_byte_codes_fn_t)(JNIEnv *, jclass, char *, jint, jint); } -static void* volatile _verify_byte_codes_fn = NULL; +static verify_byte_codes_fn_t volatile _verify_byte_codes_fn = NULL; + +static verify_byte_codes_fn_t verify_byte_codes_fn() { -static volatile jint _is_new_verify_byte_codes_fn = (jint) true; + if (_verify_byte_codes_fn != NULL) + return _verify_byte_codes_fn; + + MutexLocker locker(Verify_lock); + + if (_verify_byte_codes_fn != NULL) + return _verify_byte_codes_fn; -static void* verify_byte_codes_fn() { - if (OrderAccess::load_acquire(&_verify_byte_codes_fn) == NULL) { - void *lib_handle = os::native_java_library(); - void *func = os::dll_lookup(lib_handle, "VerifyClassCodesForMajorVersion"); - OrderAccess::release_store(&_verify_byte_codes_fn, func); - if (func == NULL) { - _is_new_verify_byte_codes_fn = false; - func = os::dll_lookup(lib_handle, "VerifyClassCodes"); - OrderAccess::release_store(&_verify_byte_codes_fn, func); - } - } - return (void*)_verify_byte_codes_fn; + // Load verify dll + char buffer[JVM_MAXPATHLEN]; + char ebuf[1024]; + if (!os::dll_locate_lib(buffer, sizeof(buffer), Arguments::get_dll_dir(), "verify")) + return NULL; // Caller will throw VerifyError + + void *lib_handle = os::dll_load(buffer, ebuf, sizeof(ebuf)); + if (lib_handle == NULL) + return NULL; // Caller will throw VerifyError + + void *fn = os::dll_lookup(lib_handle, "VerifyClassForMajorVersion"); + if (fn == NULL) + return NULL; // Caller will throw VerifyError + + return _verify_byte_codes_fn = CAST_TO_FN_PTR(verify_byte_codes_fn_t, fn); } @@ -282,7 +292,7 @@ JavaThread* thread = (JavaThread*)THREAD; JNIEnv *env = thread->jni_environment(); - void* verify_func = verify_byte_codes_fn(); + verify_byte_codes_fn_t verify_func = verify_byte_codes_fn(); if (verify_func == NULL) { jio_snprintf(message, message_len, "Could not link verifier"); @@ -301,16 +311,7 @@ // ThreadToNativeFromVM takes care of changing thread_state, so safepoint // code knows that we have left the VM - if (_is_new_verify_byte_codes_fn) { - verify_byte_codes_fn_new_t func = - CAST_TO_FN_PTR(verify_byte_codes_fn_new_t, verify_func); - result = (*func)(env, cls, message, (int)message_len, - klass->major_version()); - } else { - verify_byte_codes_fn_t func = - CAST_TO_FN_PTR(verify_byte_codes_fn_t, verify_func); - result = (*func)(env, cls, message, (int)message_len); - } + result = (*verify_func)(env, cls, message, (int)message_len, klass->major_version()); } JNIHandles::destroy_local(cls); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/code/codeCache.cpp --- a/src/hotspot/share/code/codeCache.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/code/codeCache.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -33,6 +33,7 @@ #include "code/icBuffer.hpp" #include "code/nmethod.hpp" #include "code/pcDesc.hpp" +#include "compiler/compilationPolicy.hpp" #include "compiler/compileBroker.hpp" #include "jfr/jfrEvents.hpp" #include "logging/log.hpp" @@ -46,7 +47,6 @@ #include "oops/oop.inline.hpp" #include "oops/verifyOopClosure.hpp" #include "runtime/arguments.hpp" -#include "runtime/compilationPolicy.hpp" #include "runtime/deoptimization.hpp" #include "runtime/handles.inline.hpp" #include "runtime/icache.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/code/compiledIC.cpp --- a/src/hotspot/share/code/compiledIC.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/code/compiledIC.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -741,4 +741,22 @@ tty->cr(); } +void CompiledDirectStaticCall::verify_mt_safe(const methodHandle& callee, address entry, + NativeMovConstReg* method_holder, + NativeJump* jump) { + // A generated lambda form might be deleted from the Lambdaform + // cache in MethodTypeForm. If a jit compiled lambdaform method + // becomes not entrant and the cache access returns null, the new + // resolve will lead to a new generated LambdaForm. + Method* old_method = reinterpret_cast(method_holder->data()); + assert(old_method == NULL || old_method == callee() || + callee->is_compiled_lambda_form() || + !old_method->method_holder()->is_loader_alive() || + old_method->is_old(), // may be race patching deoptimized nmethod due to redefinition. + "a) MT-unsafe modification of inline cache"); + + address destination = jump->jump_destination(); + assert(destination == (address)-1 || destination == entry, + "b) MT-unsafe modification of inline cache"); +} #endif // !PRODUCT diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/code/compiledIC.hpp --- a/src/hotspot/share/code/compiledIC.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/code/compiledIC.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -402,6 +402,9 @@ // Also used by CompiledIC void set_to_interpreted(const methodHandle& callee, address entry); + void verify_mt_safe(const methodHandle& callee, address entry, + NativeMovConstReg* method_holder, + NativeJump* jump) PRODUCT_RETURN; #if INCLUDE_AOT void set_to_far(const methodHandle& callee, address entry); #endif diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/compiler/compilationPolicy.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/compiler/compilationPolicy.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -0,0 +1,506 @@ +/* + * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/classLoaderDataGraph.inline.hpp" +#include "code/compiledIC.hpp" +#include "code/nmethod.hpp" +#include "code/scopeDesc.hpp" +#include "compiler/compilationPolicy.hpp" +#include "compiler/tieredThresholdPolicy.hpp" +#include "interpreter/interpreter.hpp" +#include "memory/resourceArea.hpp" +#include "oops/methodData.hpp" +#include "oops/method.inline.hpp" +#include "oops/oop.inline.hpp" +#include "prims/nativeLookup.hpp" +#include "runtime/frame.hpp" +#include "runtime/handles.inline.hpp" +#include "runtime/stubRoutines.hpp" +#include "runtime/thread.hpp" +#include "runtime/vframe.hpp" +#include "runtime/vmOperations.hpp" +#include "utilities/events.hpp" +#include "utilities/globalDefinitions.hpp" + +#ifdef COMPILER1 +#include "c1/c1_Compiler.hpp" +#endif +#ifdef COMPILER2 +#include "opto/c2compiler.hpp" +#endif + +CompilationPolicy* CompilationPolicy::_policy; + +// Determine compilation policy based on command line argument +void compilationPolicy_init() { + #ifdef TIERED + if (TieredCompilation) { + CompilationPolicy::set_policy(new TieredThresholdPolicy()); + } else { + CompilationPolicy::set_policy(new SimpleCompPolicy()); + } + #else + CompilationPolicy::set_policy(new SimpleCompPolicy()); + #endif + + CompilationPolicy::policy()->initialize(); +} + +// Returns true if m must be compiled before executing it +// This is intended to force compiles for methods (usually for +// debugging) that would otherwise be interpreted for some reason. +bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level) { + // Don't allow Xcomp to cause compiles in replay mode + if (ReplayCompiles) return false; + + if (m->has_compiled_code()) return false; // already compiled + if (!can_be_compiled(m, comp_level)) return false; + + return !UseInterpreter || // must compile all methods + (UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods +} + +void CompilationPolicy::compile_if_required(const methodHandle& selected_method, TRAPS) { + if (must_be_compiled(selected_method)) { + // This path is unusual, mostly used by the '-Xcomp' stress test mode. + + // Note: with several active threads, the must_be_compiled may be true + // while can_be_compiled is false; remove assert + // assert(CompilationPolicy::can_be_compiled(selected_method), "cannot compile"); + if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) { + // don't force compilation, resolve was on behalf of compiler + return; + } + if (selected_method->method_holder()->is_not_initialized()) { + // 'is_not_initialized' means not only '!is_initialized', but also that + // initialization has not been started yet ('!being_initialized') + // Do not force compilation of methods in uninitialized classes. + // Note that doing this would throw an assert later, + // in CompileBroker::compile_method. + // We sometimes use the link resolver to do reflective lookups + // even before classes are initialized. + return; + } + CompileBroker::compile_method(selected_method, InvocationEntryBci, + CompilationPolicy::policy()->initial_compile_level(), + methodHandle(), 0, CompileTask::Reason_MustBeCompiled, CHECK); + } +} + +// Returns true if m is allowed to be compiled +bool CompilationPolicy::can_be_compiled(const methodHandle& m, int comp_level) { + // allow any levels for WhiteBox + assert(WhiteBoxAPI || comp_level == CompLevel_all || is_compile(comp_level), "illegal compilation level"); + + if (m->is_abstract()) return false; + if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false; + + // Math intrinsics should never be compiled as this can lead to + // monotonicity problems because the interpreter will prefer the + // compiled code to the intrinsic version. This can't happen in + // production because the invocation counter can't be incremented + // but we shouldn't expose the system to this problem in testing + // modes. + if (!AbstractInterpreter::can_be_compiled(m)) { + return false; + } + if (comp_level == CompLevel_all) { + if (TieredCompilation) { + // enough to be compilable at any level for tiered + return !m->is_not_compilable(CompLevel_simple) || !m->is_not_compilable(CompLevel_full_optimization); + } else { + // must be compilable at available level for non-tiered + return !m->is_not_compilable(CompLevel_highest_tier); + } + } else if (is_compile(comp_level)) { + return !m->is_not_compilable(comp_level); + } + return false; +} + +// Returns true if m is allowed to be osr compiled +bool CompilationPolicy::can_be_osr_compiled(const methodHandle& m, int comp_level) { + bool result = false; + if (comp_level == CompLevel_all) { + if (TieredCompilation) { + // enough to be osr compilable at any level for tiered + result = !m->is_not_osr_compilable(CompLevel_simple) || !m->is_not_osr_compilable(CompLevel_full_optimization); + } else { + // must be osr compilable at available level for non-tiered + result = !m->is_not_osr_compilable(CompLevel_highest_tier); + } + } else if (is_compile(comp_level)) { + result = !m->is_not_osr_compilable(comp_level); + } + return (result && can_be_compiled(m, comp_level)); +} + +bool CompilationPolicy::is_compilation_enabled() { + // NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler + return CompileBroker::should_compile_new_jobs(); +} + +CompileTask* CompilationPolicy::select_task_helper(CompileQueue* compile_queue) { + // Remove unloaded methods from the queue + for (CompileTask* task = compile_queue->first(); task != NULL; ) { + CompileTask* next = task->next(); + if (task->is_unloaded()) { + compile_queue->remove_and_mark_stale(task); + } + task = next; + } +#if INCLUDE_JVMCI + if (UseJVMCICompiler && !BackgroundCompilation) { + /* + * In blocking compilation mode, the CompileBroker will make + * compilations submitted by a JVMCI compiler thread non-blocking. These + * compilations should be scheduled after all blocking compilations + * to service non-compiler related compilations sooner and reduce the + * chance of such compilations timing out. + */ + for (CompileTask* task = compile_queue->first(); task != NULL; task = task->next()) { + if (task->is_blocking()) { + return task; + } + } + } +#endif + return compile_queue->first(); +} + +#ifndef PRODUCT +void SimpleCompPolicy::trace_osr_completion(nmethod* osr_nm) { + if (TraceOnStackReplacement) { + if (osr_nm == NULL) tty->print_cr("compilation failed"); + else tty->print_cr("nmethod " INTPTR_FORMAT, p2i(osr_nm)); + } +} +#endif // !PRODUCT + +void SimpleCompPolicy::initialize() { + // Setup the compiler thread numbers + if (CICompilerCountPerCPU) { + // Example: if CICompilerCountPerCPU is true, then we get + // max(log2(8)-1,1) = 2 compiler threads on an 8-way machine. + // May help big-app startup time. + _compiler_count = MAX2(log2_int(os::active_processor_count())-1,1); + // Make sure there is enough space in the code cache to hold all the compiler buffers + size_t buffer_size = 1; +#ifdef COMPILER1 + buffer_size = is_client_compilation_mode_vm() ? Compiler::code_buffer_size() : buffer_size; +#endif +#ifdef COMPILER2 + buffer_size = is_server_compilation_mode_vm() ? C2Compiler::initial_code_buffer_size() : buffer_size; +#endif + int max_count = (ReservedCodeCacheSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / (int)buffer_size; + if (_compiler_count > max_count) { + // Lower the compiler count such that all buffers fit into the code cache + _compiler_count = MAX2(max_count, 1); + } + FLAG_SET_ERGO(CICompilerCount, _compiler_count); + } else { + _compiler_count = CICompilerCount; + } +} + +// Note: this policy is used ONLY if TieredCompilation is off. +// compiler_count() behaves the following way: +// - with TIERED build (with both COMPILER1 and COMPILER2 defined) it should return +// zero for the c1 compilation levels in server compilation mode runs +// and c2 compilation levels in client compilation mode runs. +// - with COMPILER2 not defined it should return zero for c2 compilation levels. +// - with COMPILER1 not defined it should return zero for c1 compilation levels. +// - if neither is defined - always return zero. +int SimpleCompPolicy::compiler_count(CompLevel comp_level) { + assert(!TieredCompilation, "This policy should not be used with TieredCompilation"); + if (COMPILER2_PRESENT(is_server_compilation_mode_vm() && is_c2_compile(comp_level) ||) + is_client_compilation_mode_vm() && is_c1_compile(comp_level)) { + return _compiler_count; + } + return 0; +} + +void SimpleCompPolicy::reset_counter_for_invocation_event(const methodHandle& m) { + // Make sure invocation and backedge counter doesn't overflow again right away + // as would be the case for native methods. + + // BUT also make sure the method doesn't look like it was never executed. + // Set carry bit and reduce counter's value to min(count, CompileThreshold/2). + MethodCounters* mcs = m->method_counters(); + assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); + mcs->invocation_counter()->set_carry(); + mcs->backedge_counter()->set_carry(); + + assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed"); +} + +void SimpleCompPolicy::reset_counter_for_back_branch_event(const methodHandle& m) { + // Delay next back-branch event but pump up invocation counter to trigger + // whole method compilation. + MethodCounters* mcs = m->method_counters(); + assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); + InvocationCounter* i = mcs->invocation_counter(); + InvocationCounter* b = mcs->backedge_counter(); + + // Don't set invocation_counter's value too low otherwise the method will + // look like immature (ic < ~5300) which prevents the inlining based on + // the type profiling. + i->set(i->state(), CompileThreshold); + // Don't reset counter too low - it is used to check if OSR method is ready. + b->set(b->state(), CompileThreshold / 2); +} + +// +// CounterDecay +// +// Iterates through invocation counters and decrements them. This +// is done at each safepoint. +// +class CounterDecay : public AllStatic { + static jlong _last_timestamp; + static void do_method(Method* m) { + MethodCounters* mcs = m->method_counters(); + if (mcs != NULL) { + mcs->invocation_counter()->decay(); + } + } +public: + static void decay(); + static bool is_decay_needed() { + return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength; + } +}; + +jlong CounterDecay::_last_timestamp = 0; + +void CounterDecay::decay() { + _last_timestamp = os::javaTimeMillis(); + + // This operation is going to be performed only at the end of a safepoint + // and hence GC's will not be going on, all Java mutators are suspended + // at this point and hence SystemDictionary_lock is also not needed. + assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint"); + size_t nclasses = ClassLoaderDataGraph::num_instance_classes(); + size_t classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 / + CounterHalfLifeTime); + for (size_t i = 0; i < classes_per_tick; i++) { + InstanceKlass* k = ClassLoaderDataGraph::try_get_next_class(); + if (k != NULL) { + k->methods_do(do_method); + } + } +} + +// Called at the end of the safepoint +void SimpleCompPolicy::do_safepoint_work() { + if(UseCounterDecay && CounterDecay::is_decay_needed()) { + CounterDecay::decay(); + } +} + +void SimpleCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) { + ScopeDesc* sd = trap_scope; + MethodCounters* mcs; + InvocationCounter* c; + for (; !sd->is_top(); sd = sd->sender()) { + mcs = sd->method()->method_counters(); + if (mcs != NULL) { + // Reset ICs of inlined methods, since they can trigger compilations also. + mcs->invocation_counter()->reset(); + } + } + mcs = sd->method()->method_counters(); + if (mcs != NULL) { + c = mcs->invocation_counter(); + if (is_osr) { + // It was an OSR method, so bump the count higher. + c->set(c->state(), CompileThreshold); + } else { + c->reset(); + } + mcs->backedge_counter()->reset(); + } +} + +// This method can be called by any component of the runtime to notify the policy +// that it's recommended to delay the compilation of this method. +void SimpleCompPolicy::delay_compilation(Method* method) { + MethodCounters* mcs = method->method_counters(); + if (mcs != NULL) { + mcs->invocation_counter()->decay(); + mcs->backedge_counter()->decay(); + } +} + +void SimpleCompPolicy::disable_compilation(Method* method) { + MethodCounters* mcs = method->method_counters(); + if (mcs != NULL) { + mcs->invocation_counter()->set_state(InvocationCounter::wait_for_nothing); + mcs->backedge_counter()->set_state(InvocationCounter::wait_for_nothing); + } +} + +CompileTask* SimpleCompPolicy::select_task(CompileQueue* compile_queue) { + return select_task_helper(compile_queue); +} + +bool SimpleCompPolicy::is_mature(Method* method) { + MethodData* mdo = method->method_data(); + assert(mdo != NULL, "Should be"); + uint current = mdo->mileage_of(method); + uint initial = mdo->creation_mileage(); + if (current < initial) + return true; // some sort of overflow + uint target; + if (ProfileMaturityPercentage <= 0) + target = (uint) -ProfileMaturityPercentage; // absolute value + else + target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 ); + return (current >= initial + target); +} + +nmethod* SimpleCompPolicy::event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, + int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) { + assert(comp_level == CompLevel_none, "This should be only called from the interpreter"); + NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci)); + if (JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) { + // If certain JVMTI events (e.g. frame pop event) are requested then the + // thread is forced to remain in interpreted code. This is + // implemented partly by a check in the run_compiled_code + // section of the interpreter whether we should skip running + // compiled code, and partly by skipping OSR compiles for + // interpreted-only threads. + if (bci != InvocationEntryBci) { + reset_counter_for_back_branch_event(method); + return NULL; + } + } + if (ReplayCompiles) { + // Don't trigger other compiles in testing mode + if (bci == InvocationEntryBci) { + reset_counter_for_invocation_event(method); + } else { + reset_counter_for_back_branch_event(method); + } + return NULL; + } + + if (bci == InvocationEntryBci) { + // when code cache is full, compilation gets switched off, UseCompiler + // is set to false + if (!method->has_compiled_code() && UseCompiler) { + method_invocation_event(method, thread); + } else { + // Force counter overflow on method entry, even if no compilation + // happened. (The method_invocation_event call does this also.) + reset_counter_for_invocation_event(method); + } + // compilation at an invocation overflow no longer goes and retries test for + // compiled method. We always run the loser of the race as interpreted. + // so return NULL + return NULL; + } else { + // counter overflow in a loop => try to do on-stack-replacement + nmethod* osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true); + NOT_PRODUCT(trace_osr_request(method, osr_nm, bci)); + // when code cache is full, we should not compile any more... + if (osr_nm == NULL && UseCompiler) { + method_back_branch_event(method, bci, thread); + osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true); + } + if (osr_nm == NULL) { + reset_counter_for_back_branch_event(method); + return NULL; + } + return osr_nm; + } + return NULL; +} + +#ifndef PRODUCT +void SimpleCompPolicy::trace_frequency_counter_overflow(const methodHandle& m, int branch_bci, int bci) { + if (TraceInvocationCounterOverflow) { + MethodCounters* mcs = m->method_counters(); + assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); + InvocationCounter* ic = mcs->invocation_counter(); + InvocationCounter* bc = mcs->backedge_counter(); + ResourceMark rm; + if (bci == InvocationEntryBci) { + tty->print("comp-policy cntr ovfl @ %d in entry of ", bci); + } else { + tty->print("comp-policy cntr ovfl @ %d in loop of ", bci); + } + m->print_value(); + tty->cr(); + ic->print(); + bc->print(); + if (ProfileInterpreter) { + if (bci != InvocationEntryBci) { + MethodData* mdo = m->method_data(); + if (mdo != NULL) { + ProfileData *pd = mdo->bci_to_data(branch_bci); + if (pd == NULL) { + tty->print_cr("back branch count = N/A (missing ProfileData)"); + } else { + tty->print_cr("back branch count = %d", pd->as_JumpData()->taken()); + } + } + } + } + } +} + +void SimpleCompPolicy::trace_osr_request(const methodHandle& method, nmethod* osr, int bci) { + if (TraceOnStackReplacement) { + ResourceMark rm; + tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for "); + method->print_short_name(tty); + tty->print_cr(" at bci %d", bci); + } +} +#endif // !PRODUCT + +void SimpleCompPolicy::method_invocation_event(const methodHandle& m, JavaThread* thread) { + const int comp_level = CompLevel_highest_tier; + const int hot_count = m->invocation_count(); + reset_counter_for_invocation_event(m); + + if (is_compilation_enabled() && can_be_compiled(m, comp_level)) { + CompiledMethod* nm = m->code(); + if (nm == NULL ) { + CompileBroker::compile_method(m, InvocationEntryBci, comp_level, m, hot_count, CompileTask::Reason_InvocationCount, thread); + } + } +} + +void SimpleCompPolicy::method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) { + const int comp_level = CompLevel_highest_tier; + const int hot_count = m->backedge_count(); + + if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) { + CompileBroker::compile_method(m, bci, comp_level, m, hot_count, CompileTask::Reason_BackedgeCount, thread); + NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));) + } +} diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/compiler/compilationPolicy.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/compiler/compilationPolicy.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_COMPILER_COMPILATIONPOLICY_HPP +#define SHARE_COMPILER_COMPILATIONPOLICY_HPP + +#include "code/nmethod.hpp" +#include "compiler/compileBroker.hpp" +#include "memory/allocation.hpp" +#include "runtime/vmOperations.hpp" +#include "utilities/growableArray.hpp" + +// The CompilationPolicy selects which method (if any) should be compiled. +// It also decides which methods must always be compiled (i.e., are never +// interpreted). +class CompileTask; +class CompileQueue; + +class CompilationPolicy : public CHeapObj { + static CompilationPolicy* _policy; + + // m must be compiled before executing it + static bool must_be_compiled(const methodHandle& m, int comp_level = CompLevel_all); + +public: + // If m must_be_compiled then request a compilation from the CompileBroker. + // This supports the -Xcomp option. + static void compile_if_required(const methodHandle& m, TRAPS); + + // m is allowed to be compiled + static bool can_be_compiled(const methodHandle& m, int comp_level = CompLevel_all); + // m is allowed to be osr compiled + static bool can_be_osr_compiled(const methodHandle& m, int comp_level = CompLevel_all); + static bool is_compilation_enabled(); + static void set_policy(CompilationPolicy* policy) { _policy = policy; } + static CompilationPolicy* policy() { return _policy; } + + static CompileTask* select_task_helper(CompileQueue* compile_queue); + + // Return initial compile level that is used with Xcomp + virtual CompLevel initial_compile_level() = 0; + virtual int compiler_count(CompLevel comp_level) = 0; + // main notification entry, return a pointer to an nmethod if the OSR is required, + // returns NULL otherwise. + virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) = 0; + // safepoint() is called at the end of the safepoint + virtual void do_safepoint_work() = 0; + // reprofile request + virtual void reprofile(ScopeDesc* trap_scope, bool is_osr) = 0; + // delay_compilation(method) can be called by any component of the runtime to notify the policy + // that it's recommended to delay the compilation of this method. + virtual void delay_compilation(Method* method) = 0; + // disable_compilation() is called whenever the runtime decides to disable compilation of the + // specified method. + virtual void disable_compilation(Method* method) = 0; + // Select task is called by CompileBroker. The queue is guaranteed to have at least one + // element and is locked. The function should select one and return it. + virtual CompileTask* select_task(CompileQueue* compile_queue) = 0; + // Tell the runtime if we think a given method is adequately profiled. + virtual bool is_mature(Method* method) = 0; + // Do policy initialization + virtual void initialize() = 0; + virtual bool should_not_inline(ciEnv* env, ciMethod* method) { return false; } +}; + +// A simple compilation policy. +class SimpleCompPolicy : public CompilationPolicy { + int _compiler_count; + private: + static void trace_frequency_counter_overflow(const methodHandle& m, int branch_bci, int bci); + static void trace_osr_request(const methodHandle& method, nmethod* osr, int bci); + static void trace_osr_completion(nmethod* osr_nm); + void reset_counter_for_invocation_event(const methodHandle& method); + void reset_counter_for_back_branch_event(const methodHandle& method); + void method_invocation_event(const methodHandle& m, JavaThread* thread); + void method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread); + public: + SimpleCompPolicy() : _compiler_count(0) { } + virtual CompLevel initial_compile_level() { return CompLevel_highest_tier; } + virtual int compiler_count(CompLevel comp_level); + virtual void do_safepoint_work(); + virtual void reprofile(ScopeDesc* trap_scope, bool is_osr); + virtual void delay_compilation(Method* method); + virtual void disable_compilation(Method* method); + virtual bool is_mature(Method* method); + virtual void initialize(); + virtual CompileTask* select_task(CompileQueue* compile_queue); + virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread); +}; + + +#endif // SHARE_COMPILER_COMPILATIONPOLICY_HPP diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/compiler/compileBroker.cpp --- a/src/hotspot/share/compiler/compileBroker.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/compiler/compileBroker.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -30,6 +30,7 @@ #include "code/codeCache.hpp" #include "code/codeHeapState.hpp" #include "code/dependencyContext.hpp" +#include "compiler/compilationPolicy.hpp" #include "compiler/compileBroker.hpp" #include "compiler/compileLog.hpp" #include "compiler/compilerOracle.hpp" @@ -48,7 +49,6 @@ #include "prims/whitebox.hpp" #include "runtime/arguments.hpp" #include "runtime/atomic.hpp" -#include "runtime/compilationPolicy.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" #include "runtime/interfaceSupport.inline.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/compiler/compilerDirectives.hpp --- a/src/hotspot/share/compiler/compilerDirectives.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/compiler/compilerDirectives.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -66,8 +66,7 @@ cflags(VectorizeDebug, uintx, 0, VectorizeDebug) \ cflags(CloneMapDebug, bool, false, CloneMapDebug) \ cflags(IGVPrintLevel, intx, PrintIdealGraphLevel, IGVPrintLevel) \ - cflags(MaxNodeLimit, intx, MaxNodeLimit, MaxNodeLimit) \ -ZGC_ONLY(cflags(ZTraceLoadBarriers, bool, false, ZTraceLoadBarriers)) + cflags(MaxNodeLimit, intx, MaxNodeLimit, MaxNodeLimit) #else #define compilerdirectives_c2_flags(cflags) #endif diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/compiler/oopMap.cpp --- a/src/hotspot/share/compiler/oopMap.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/compiler/oopMap.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -48,29 +48,25 @@ // OopMapStream -OopMapStream::OopMapStream(OopMap* oop_map, int oop_types_mask) { +OopMapStream::OopMapStream(OopMap* oop_map) { _stream = new CompressedReadStream(oop_map->write_stream()->buffer()); - _mask = oop_types_mask; _size = oop_map->omv_count(); _position = 0; _valid_omv = false; } -OopMapStream::OopMapStream(const ImmutableOopMap* oop_map, int oop_types_mask) { +OopMapStream::OopMapStream(const ImmutableOopMap* oop_map) { _stream = new CompressedReadStream(oop_map->data_addr()); - _mask = oop_types_mask; _size = oop_map->count(); _position = 0; _valid_omv = false; } void OopMapStream::find_next() { - while(_position++ < _size) { + if (_position++ < _size) { _omv.read_from(_stream); - if(((int)_omv.type() & _mask) > 0) { - _valid_omv = true; - return; - } + _valid_omv = true; + return; } _valid_omv = false; } @@ -140,16 +136,7 @@ assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" ); debug_only( _locs_used[reg->value()] = x; ) - OopMapValue o(reg, x); - - if(x == OopMapValue::callee_saved_value) { - // This can never be a stack location, so we don't need to transform it. - assert(optional->is_reg(), "Trying to callee save a stack location"); - o.set_content_reg(optional); - } else if(x == OopMapValue::derived_oop_value) { - o.set_content_reg(optional); - } - + OopMapValue o(reg, x, optional); o.write_on(write_stream()); increment_count(); } @@ -160,11 +147,6 @@ } -void OopMap::set_value(VMReg reg) { - // At this time, we don't need value entries in our OopMap. -} - - void OopMap::set_narrowoop(VMReg reg) { set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad()); } @@ -328,7 +310,7 @@ // changed before derived pointer offset has been collected) OopMapValue omv; { - OopMapStream oms(map,OopMapValue::derived_oop_value); + OopMapStream oms(map); if (!oms.is_done()) { #ifndef TIERED COMPILER1_PRESENT(ShouldNotReachHere();) @@ -340,27 +322,28 @@ #endif // !TIERED do { omv = oms.current(); - oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map); - guarantee(loc != NULL, "missing saved register"); - oop *derived_loc = loc; - oop *base_loc = fr->oopmapreg_to_location(omv.content_reg(), reg_map); - // Ignore NULL oops and decoded NULL narrow oops which - // equal to CompressedOops::base() when a narrow oop - // implicit null check is used in compiled code. - // The narrow_oop_base could be NULL or be the address - // of the page below heap depending on compressed oops mode. - if (base_loc != NULL && *base_loc != NULL && !CompressedOops::is_base(*base_loc)) { - derived_oop_fn(base_loc, derived_loc); + if (omv.type() == OopMapValue::derived_oop_value) { + oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map); + guarantee(loc != NULL, "missing saved register"); + oop *derived_loc = loc; + oop *base_loc = fr->oopmapreg_to_location(omv.content_reg(), reg_map); + // Ignore NULL oops and decoded NULL narrow oops which + // equal to CompressedOops::base() when a narrow oop + // implicit null check is used in compiled code. + // The narrow_oop_base could be NULL or be the address + // of the page below heap depending on compressed oops mode. + if (base_loc != NULL && *base_loc != NULL && !CompressedOops::is_base(*base_loc)) { + derived_oop_fn(base_loc, derived_loc); + } } oms.next(); } while (!oms.is_done()); } } - // We want coop and oop oop_types - int mask = OopMapValue::oop_value | OopMapValue::narrowoop_value; { - for (OopMapStream oms(map,mask); !oms.is_done(); oms.next()) { + // We want coop and oop oop_types + for (OopMapStream oms(map); !oms.is_done(); oms.next()) { omv = oms.current(); oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map); // It should be an error if no location can be found for a @@ -436,12 +419,14 @@ assert(map != NULL, "no ptr map found"); DEBUG_ONLY(int nof_callee = 0;) - for (OopMapStream oms(map, OopMapValue::callee_saved_value); !oms.is_done(); oms.next()) { + for (OopMapStream oms(map); !oms.is_done(); oms.next()) { OopMapValue omv = oms.current(); - VMReg reg = omv.content_reg(); - oop* loc = fr->oopmapreg_to_location(omv.reg(), reg_map); - reg_map->set_location(reg, (address) loc); - DEBUG_ONLY(nof_callee++;) + if (omv.type() == OopMapValue::callee_saved_value) { + VMReg reg = omv.content_reg(); + oop* loc = fr->oopmapreg_to_location(omv.reg(), reg_map); + reg_map->set_location(reg, (address) loc); + DEBUG_ONLY(nof_callee++;) + } } // Check that runtime stubs save all callee-saved registers @@ -452,25 +437,6 @@ #endif // COMPILER2 } -//============================================================================= -// Non-Product code - -#ifndef PRODUCT - -bool ImmutableOopMap::has_derived_pointer() const { -#if !defined(TIERED) && !INCLUDE_JVMCI - COMPILER1_PRESENT(return false); -#endif // !TIERED -#if COMPILER2_OR_JVMCI - OopMapStream oms(this,OopMapValue::derived_oop_value); - return oms.is_done(); -#else - return false; -#endif // COMPILER2_OR_JVMCI -} - -#endif //PRODUCT - // Printing code is present in product build for -XX:+PrintAssembly. static diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/compiler/oopMap.hpp --- a/src/hotspot/share/compiler/oopMap.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/compiler/oopMap.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -53,7 +53,7 @@ public: // Constants - enum { type_bits = 4, + enum { type_bits = 2, register_bits = BitsPerShort - type_bits }; enum { type_shift = 0, @@ -64,19 +64,41 @@ register_mask = right_n_bits(register_bits), register_mask_in_place = register_mask << register_shift }; - enum oop_types { // must fit in type_bits - unused_value =0, // powers of 2, for masking OopMapStream - oop_value = 1, - narrowoop_value = 2, - callee_saved_value = 4, - derived_oop_value= 8 }; + enum oop_types { + oop_value, + narrowoop_value, + callee_saved_value, + derived_oop_value, + unused_value = -1 // Only used as a sentinel value + }; // Constructors OopMapValue () { set_value(0); set_content_reg(VMRegImpl::Bad()); } - OopMapValue (VMReg reg, oop_types t) { set_reg_type(reg, t); set_content_reg(VMRegImpl::Bad()); } - OopMapValue (VMReg reg, oop_types t, VMReg reg2) { set_reg_type(reg, t); set_content_reg(reg2); } - OopMapValue (CompressedReadStream* stream) { read_from(stream); } + OopMapValue (VMReg reg, oop_types t, VMReg reg2) { + set_reg_type(reg, t); + set_content_reg(reg2); + } + + private: + void set_reg_type(VMReg p, oop_types t) { + set_value((p->value() << register_shift) | t); + assert(reg() == p, "sanity check" ); + assert(type() == t, "sanity check" ); + } + void set_content_reg(VMReg r) { + if (is_callee_saved()) { + // This can never be a stack location, so we don't need to transform it. + assert(r->is_reg(), "Trying to callee save a stack location"); + } else if (is_derived_oop()) { + assert (r->is_valid(), "must have a valid VMReg"); + } else { + assert (!r->is_valid(), "valid VMReg not allowed"); + } + _content_reg = r->value(); + } + + public: // Archiving void write_on(CompressedWriteStream* stream) { stream->write_int(value()); @@ -94,15 +116,10 @@ // Querying bool is_oop() { return mask_bits(value(), type_mask_in_place) == oop_value; } - bool is_narrowoop() { return mask_bits(value(), type_mask_in_place) == narrowoop_value; } + bool is_narrowoop() { return mask_bits(value(), type_mask_in_place) == narrowoop_value; } bool is_callee_saved() { return mask_bits(value(), type_mask_in_place) == callee_saved_value; } bool is_derived_oop() { return mask_bits(value(), type_mask_in_place) == derived_oop_value; } - void set_oop() { set_value((value() & register_mask_in_place) | oop_value); } - void set_narrowoop() { set_value((value() & register_mask_in_place) | narrowoop_value); } - void set_callee_saved() { set_value((value() & register_mask_in_place) | callee_saved_value); } - void set_derived_oop() { set_value((value() & register_mask_in_place) | derived_oop_value); } - VMReg reg() const { return VMRegImpl::as_VMReg(mask_bits(value(), register_mask_in_place) >> register_shift); } oop_types type() const { return (oop_types)mask_bits(value(), type_mask_in_place); } @@ -110,15 +127,7 @@ return (p->value() == (p->value() & register_mask)); } - void set_reg_type(VMReg p, oop_types t) { - set_value((p->value() << register_shift) | t); - assert(reg() == p, "sanity check" ); - assert(type() == t, "sanity check" ); - } - - VMReg content_reg() const { return VMRegImpl::as_VMReg(_content_reg, true); } - void set_content_reg(VMReg r) { _content_reg = r->value(); } // Physical location queries bool is_register_loc() { return reg()->is_reg(); } @@ -156,6 +165,8 @@ enum DeepCopyToken { _deep_copy_token }; OopMap(DeepCopyToken, OopMap* source); // used only by deep_copy + void set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional); + public: OopMap(int frame_size, int arg_count); @@ -173,19 +184,14 @@ // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd // slots to hold 4-byte values like ints and floats in the LP64 build. void set_oop ( VMReg local); - void set_value( VMReg local); void set_narrowoop(VMReg local); - void set_dead ( VMReg local); void set_callee_saved( VMReg local, VMReg caller_machine_register ); void set_derived_oop ( VMReg local, VMReg derived_from_local_register ); - void set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional); int heap_size() const; void copy_data_to(address addr) const; OopMap* deep_copy(); - bool has_derived_pointer() const PRODUCT_RETURN0; - bool legal_vm_reg_name(VMReg local) { return OopMapValue::legal_vm_reg_name(local); } @@ -269,7 +275,6 @@ public: ImmutableOopMap(const OopMap* oopmap); - bool has_derived_pointer() const PRODUCT_RETURN0; int count() const { return _count; } #ifdef ASSERT int nr_of_bytes() const; // this is an expensive operation, only used in debug builds @@ -334,7 +339,6 @@ class OopMapStream : public StackObj { private: CompressedReadStream* _stream; - int _mask; int _size; int _position; bool _valid_omv; @@ -342,8 +346,8 @@ void find_next(); public: - OopMapStream(OopMap* oop_map, int oop_types_mask = OopMapValue::type_mask_in_place); - OopMapStream(const ImmutableOopMap* oop_map, int oop_types_mask = OopMapValue::type_mask_in_place); + OopMapStream(OopMap* oop_map); + OopMapStream(const ImmutableOopMap* oop_map); bool is_done() { if(!_valid_omv) { find_next(); } return !_valid_omv; } void next() { find_next(); } OopMapValue current() { return _omv; } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/compiler/tieredThresholdPolicy.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/compiler/tieredThresholdPolicy.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -0,0 +1,1005 @@ +/* + * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "compiler/compileBroker.hpp" +#include "compiler/compilerOracle.hpp" +#include "compiler/tieredThresholdPolicy.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/arguments.hpp" +#include "runtime/handles.inline.hpp" +#include "runtime/safepoint.hpp" +#include "runtime/safepointVerifiers.hpp" +#include "code/scopeDesc.hpp" +#include "oops/method.inline.hpp" +#if INCLUDE_JVMCI +#include "jvmci/jvmci.hpp" +#endif + +#ifdef TIERED + +#include "c1/c1_Compiler.hpp" +#include "opto/c2compiler.hpp" + +template +bool TieredThresholdPolicy::call_predicate_helper(int i, int b, double scale, Method* method) { + double threshold_scaling; + if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) { + scale *= threshold_scaling; + } + switch(level) { + case CompLevel_aot: + return (i >= Tier3AOTInvocationThreshold * scale) || + (i >= Tier3AOTMinInvocationThreshold * scale && i + b >= Tier3AOTCompileThreshold * scale); + case CompLevel_none: + case CompLevel_limited_profile: + return (i >= Tier3InvocationThreshold * scale) || + (i >= Tier3MinInvocationThreshold * scale && i + b >= Tier3CompileThreshold * scale); + case CompLevel_full_profile: + return (i >= Tier4InvocationThreshold * scale) || + (i >= Tier4MinInvocationThreshold * scale && i + b >= Tier4CompileThreshold * scale); + } + return true; +} + +template +bool TieredThresholdPolicy::loop_predicate_helper(int i, int b, double scale, Method* method) { + double threshold_scaling; + if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) { + scale *= threshold_scaling; + } + switch(level) { + case CompLevel_aot: + return b >= Tier3AOTBackEdgeThreshold * scale; + case CompLevel_none: + case CompLevel_limited_profile: + return b >= Tier3BackEdgeThreshold * scale; + case CompLevel_full_profile: + return b >= Tier4BackEdgeThreshold * scale; + } + return true; +} + +// Simple methods are as good being compiled with C1 as C2. +// Determine if a given method is such a case. +bool TieredThresholdPolicy::is_trivial(Method* method) { + if (method->is_accessor() || + method->is_constant_getter()) { + return true; + } + return false; +} + +bool TieredThresholdPolicy::should_compile_at_level_simple(Method* method) { + if (TieredThresholdPolicy::is_trivial(method)) { + return true; + } +#if INCLUDE_JVMCI + if (UseJVMCICompiler) { + AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization); + if (comp != NULL && comp->is_jvmci() && ((JVMCICompiler*) comp)->force_comp_at_level_simple(method)) { + return true; + } + } +#endif + return false; +} + +CompLevel TieredThresholdPolicy::comp_level(Method* method) { + CompiledMethod *nm = method->code(); + if (nm != NULL && nm->is_in_use()) { + return (CompLevel)nm->comp_level(); + } + return CompLevel_none; +} + +void TieredThresholdPolicy::print_counters(const char* prefix, const methodHandle& mh) { + int invocation_count = mh->invocation_count(); + int backedge_count = mh->backedge_count(); + MethodData* mdh = mh->method_data(); + int mdo_invocations = 0, mdo_backedges = 0; + int mdo_invocations_start = 0, mdo_backedges_start = 0; + if (mdh != NULL) { + mdo_invocations = mdh->invocation_count(); + mdo_backedges = mdh->backedge_count(); + mdo_invocations_start = mdh->invocation_count_start(); + mdo_backedges_start = mdh->backedge_count_start(); + } + tty->print(" %stotal=%d,%d %smdo=%d(%d),%d(%d)", prefix, + invocation_count, backedge_count, prefix, + mdo_invocations, mdo_invocations_start, + mdo_backedges, mdo_backedges_start); + tty->print(" %smax levels=%d,%d", prefix, + mh->highest_comp_level(), mh->highest_osr_comp_level()); +} + +// Print an event. +void TieredThresholdPolicy::print_event(EventType type, const methodHandle& mh, const methodHandle& imh, + int bci, CompLevel level) { + bool inlinee_event = mh() != imh(); + + ttyLocker tty_lock; + tty->print("%lf: [", os::elapsedTime()); + + switch(type) { + case CALL: + tty->print("call"); + break; + case LOOP: + tty->print("loop"); + break; + case COMPILE: + tty->print("compile"); + break; + case REMOVE_FROM_QUEUE: + tty->print("remove-from-queue"); + break; + case UPDATE_IN_QUEUE: + tty->print("update-in-queue"); + break; + case REPROFILE: + tty->print("reprofile"); + break; + case MAKE_NOT_ENTRANT: + tty->print("make-not-entrant"); + break; + default: + tty->print("unknown"); + } + + tty->print(" level=%d ", level); + + ResourceMark rm; + char *method_name = mh->name_and_sig_as_C_string(); + tty->print("[%s", method_name); + if (inlinee_event) { + char *inlinee_name = imh->name_and_sig_as_C_string(); + tty->print(" [%s]] ", inlinee_name); + } + else tty->print("] "); + tty->print("@%d queues=%d,%d", bci, CompileBroker::queue_size(CompLevel_full_profile), + CompileBroker::queue_size(CompLevel_full_optimization)); + + print_specific(type, mh, imh, bci, level); + + if (type != COMPILE) { + print_counters("", mh); + if (inlinee_event) { + print_counters("inlinee ", imh); + } + tty->print(" compilable="); + bool need_comma = false; + if (!mh->is_not_compilable(CompLevel_full_profile)) { + tty->print("c1"); + need_comma = true; + } + if (!mh->is_not_osr_compilable(CompLevel_full_profile)) { + if (need_comma) tty->print(","); + tty->print("c1-osr"); + need_comma = true; + } + if (!mh->is_not_compilable(CompLevel_full_optimization)) { + if (need_comma) tty->print(","); + tty->print("c2"); + need_comma = true; + } + if (!mh->is_not_osr_compilable(CompLevel_full_optimization)) { + if (need_comma) tty->print(","); + tty->print("c2-osr"); + } + tty->print(" status="); + if (mh->queued_for_compilation()) { + tty->print("in-queue"); + } else tty->print("idle"); + } + tty->print_cr("]"); +} + +void TieredThresholdPolicy::initialize() { + int count = CICompilerCount; + bool c1_only = TieredStopAtLevel < CompLevel_full_optimization; +#ifdef _LP64 + // Turn on ergonomic compiler count selection + if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) { + FLAG_SET_DEFAULT(CICompilerCountPerCPU, true); + } + if (CICompilerCountPerCPU) { + // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n + int log_cpu = log2_int(os::active_processor_count()); + int loglog_cpu = log2_int(MAX2(log_cpu, 1)); + count = MAX2(log_cpu * loglog_cpu * 3 / 2, 2); + // Make sure there is enough space in the code cache to hold all the compiler buffers + size_t c1_size = Compiler::code_buffer_size(); + size_t c2_size = C2Compiler::initial_code_buffer_size(); + size_t buffer_size = c1_only ? c1_size : (c1_size/3 + 2*c2_size/3); + int max_count = (ReservedCodeCacheSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / (int)buffer_size; + if (count > max_count) { + // Lower the compiler count such that all buffers fit into the code cache + count = MAX2(max_count, c1_only ? 1 : 2); + } + FLAG_SET_ERGO(CICompilerCount, count); + } +#else + // On 32-bit systems, the number of compiler threads is limited to 3. + // On these systems, the virtual address space available to the JVM + // is usually limited to 2-4 GB (the exact value depends on the platform). + // As the compilers (especially C2) can consume a large amount of + // memory, scaling the number of compiler threads with the number of + // available cores can result in the exhaustion of the address space + /// available to the VM and thus cause the VM to crash. + if (FLAG_IS_DEFAULT(CICompilerCount)) { + count = 3; + FLAG_SET_ERGO(CICompilerCount, count); + } +#endif + + if (c1_only) { + // No C2 compiler thread required + set_c1_count(count); + } else { + set_c1_count(MAX2(count / 3, 1)); + set_c2_count(MAX2(count - c1_count(), 1)); + } + assert(count == c1_count() + c2_count(), "inconsistent compiler thread count"); + + // Some inlining tuning +#ifdef X86 + if (FLAG_IS_DEFAULT(InlineSmallCode)) { + FLAG_SET_DEFAULT(InlineSmallCode, 2000); + } +#endif + +#if defined SPARC || defined AARCH64 + if (FLAG_IS_DEFAULT(InlineSmallCode)) { + FLAG_SET_DEFAULT(InlineSmallCode, 2500); + } +#endif + + set_increase_threshold_at_ratio(); + set_start_time(os::javaTimeMillis()); +} + +void TieredThresholdPolicy::set_carry_if_necessary(InvocationCounter *counter) { + if (!counter->carry() && counter->count() > InvocationCounter::count_limit / 2) { + counter->set_carry_flag(); + } +} + +// Set carry flags on the counters if necessary +void TieredThresholdPolicy::handle_counter_overflow(Method* method) { + MethodCounters *mcs = method->method_counters(); + if (mcs != NULL) { + set_carry_if_necessary(mcs->invocation_counter()); + set_carry_if_necessary(mcs->backedge_counter()); + } + MethodData* mdo = method->method_data(); + if (mdo != NULL) { + set_carry_if_necessary(mdo->invocation_counter()); + set_carry_if_necessary(mdo->backedge_counter()); + } +} + +// Called with the queue locked and with at least one element +CompileTask* TieredThresholdPolicy::select_task(CompileQueue* compile_queue) { + CompileTask *max_blocking_task = NULL; + CompileTask *max_task = NULL; + Method* max_method = NULL; + jlong t = os::javaTimeMillis(); + // Iterate through the queue and find a method with a maximum rate. + for (CompileTask* task = compile_queue->first(); task != NULL;) { + CompileTask* next_task = task->next(); + Method* method = task->method(); + // If a method was unloaded or has been stale for some time, remove it from the queue. + // Blocking tasks and tasks submitted from whitebox API don't become stale + if (task->is_unloaded() || (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method))) { + if (!task->is_unloaded()) { + if (PrintTieredEvents) { + print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel) task->comp_level()); + } + method->clear_queued_for_compilation(); + } + compile_queue->remove_and_mark_stale(task); + task = next_task; + continue; + } + update_rate(t, method); + if (max_task == NULL || compare_methods(method, max_method)) { + // Select a method with the highest rate + max_task = task; + max_method = method; + } + + if (task->is_blocking()) { + if (max_blocking_task == NULL || compare_methods(method, max_blocking_task->method())) { + max_blocking_task = task; + } + } + + task = next_task; + } + + if (max_blocking_task != NULL) { + // In blocking compilation mode, the CompileBroker will make + // compilations submitted by a JVMCI compiler thread non-blocking. These + // compilations should be scheduled after all blocking compilations + // to service non-compiler related compilations sooner and reduce the + // chance of such compilations timing out. + max_task = max_blocking_task; + max_method = max_task->method(); + } + + if (max_task != NULL && max_task->comp_level() == CompLevel_full_profile && + TieredStopAtLevel > CompLevel_full_profile && + max_method != NULL && is_method_profiled(max_method)) { + max_task->set_comp_level(CompLevel_limited_profile); + + if (CompileBroker::compilation_is_complete(max_method, max_task->osr_bci(), CompLevel_limited_profile)) { + if (PrintTieredEvents) { + print_event(REMOVE_FROM_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level()); + } + compile_queue->remove_and_mark_stale(max_task); + max_method->clear_queued_for_compilation(); + return NULL; + } + + if (PrintTieredEvents) { + print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level()); + } + } + + return max_task; +} + +void TieredThresholdPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) { + for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) { + if (PrintTieredEvents) { + methodHandle mh(sd->method()); + print_event(REPROFILE, mh, mh, InvocationEntryBci, CompLevel_none); + } + MethodData* mdo = sd->method()->method_data(); + if (mdo != NULL) { + mdo->reset_start_counters(); + } + if (sd->is_top()) break; + } +} + +nmethod* TieredThresholdPolicy::event(const methodHandle& method, const methodHandle& inlinee, + int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) { + if (comp_level == CompLevel_none && + JvmtiExport::can_post_interpreter_events() && + thread->is_interp_only_mode()) { + return NULL; + } + if (ReplayCompiles) { + // Don't trigger other compiles in testing mode + return NULL; + } + + handle_counter_overflow(method()); + if (method() != inlinee()) { + handle_counter_overflow(inlinee()); + } + + if (PrintTieredEvents) { + print_event(bci == InvocationEntryBci ? CALL : LOOP, method, inlinee, bci, comp_level); + } + + if (bci == InvocationEntryBci) { + method_invocation_event(method, inlinee, comp_level, nm, thread); + } else { + // method == inlinee if the event originated in the main method + method_back_branch_event(method, inlinee, bci, comp_level, nm, thread); + // Check if event led to a higher level OSR compilation + CompLevel expected_comp_level = comp_level; + if (inlinee->is_not_osr_compilable(expected_comp_level)) { + // It's not possble to reach the expected level so fall back to simple. + expected_comp_level = CompLevel_simple; + } + nmethod* osr_nm = inlinee->lookup_osr_nmethod_for(bci, expected_comp_level, false); + assert(osr_nm == NULL || osr_nm->comp_level() >= expected_comp_level, "lookup_osr_nmethod_for is broken"); + if (osr_nm != NULL) { + // Perform OSR with new nmethod + return osr_nm; + } + } + return NULL; +} + +// Check if the method can be compiled, change level if necessary +void TieredThresholdPolicy::compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) { + assert(level <= TieredStopAtLevel, "Invalid compilation level"); + if (level == CompLevel_none) { + return; + } + if (level == CompLevel_aot) { + if (mh->has_aot_code()) { + if (PrintTieredEvents) { + print_event(COMPILE, mh, mh, bci, level); + } + MutexLocker ml(Compile_lock); + NoSafepointVerifier nsv; + if (mh->has_aot_code() && mh->code() != mh->aot_code()) { + mh->aot_code()->make_entrant(); + if (mh->has_compiled_code()) { + mh->code()->make_not_entrant(); + } + MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag); + Method::set_code(mh, mh->aot_code()); + } + } + return; + } + + // Check if the method can be compiled. If it cannot be compiled with C1, continue profiling + // in the interpreter and then compile with C2 (the transition function will request that, + // see common() ). If the method cannot be compiled with C2 but still can with C1, compile it with + // pure C1. + if ((bci == InvocationEntryBci && !can_be_compiled(mh, level))) { + if (level == CompLevel_full_optimization && can_be_compiled(mh, CompLevel_simple)) { + compile(mh, bci, CompLevel_simple, thread); + } + return; + } + if ((bci != InvocationEntryBci && !can_be_osr_compiled(mh, level))) { + if (level == CompLevel_full_optimization && can_be_osr_compiled(mh, CompLevel_simple)) { + nmethod* osr_nm = mh->lookup_osr_nmethod_for(bci, CompLevel_simple, false); + if (osr_nm != NULL && osr_nm->comp_level() > CompLevel_simple) { + // Invalidate the existing OSR nmethod so that a compile at CompLevel_simple is permitted. + osr_nm->make_not_entrant(); + } + compile(mh, bci, CompLevel_simple, thread); + } + return; + } + if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) { + return; + } + if (!CompileBroker::compilation_is_in_queue(mh)) { + if (PrintTieredEvents) { + print_event(COMPILE, mh, mh, bci, level); + } + submit_compile(mh, bci, level, thread); + } +} + +// Update the rate and submit compile +void TieredThresholdPolicy::submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) { + int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count(); + update_rate(os::javaTimeMillis(), mh()); + CompileBroker::compile_method(mh, bci, level, mh, hot_count, CompileTask::Reason_Tiered, thread); +} + +// Print an event. +void TieredThresholdPolicy::print_specific(EventType type, const methodHandle& mh, const methodHandle& imh, + int bci, CompLevel level) { + tty->print(" rate="); + if (mh->prev_time() == 0) tty->print("n/a"); + else tty->print("%f", mh->rate()); + + tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback), + threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback)); + +} + +// update_rate() is called from select_task() while holding a compile queue lock. +void TieredThresholdPolicy::update_rate(jlong t, Method* m) { + // Skip update if counters are absent. + // Can't allocate them since we are holding compile queue lock. + if (m->method_counters() == NULL) return; + + if (is_old(m)) { + // We don't remove old methods from the queue, + // so we can just zero the rate. + m->set_rate(0); + return; + } + + // We don't update the rate if we've just came out of a safepoint. + // delta_s is the time since last safepoint in milliseconds. + jlong delta_s = t - SafepointTracing::end_of_last_safepoint_epoch_ms(); + jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement + // How many events were there since the last time? + int event_count = m->invocation_count() + m->backedge_count(); + int delta_e = event_count - m->prev_event_count(); + + // We should be running for at least 1ms. + if (delta_s >= TieredRateUpdateMinTime) { + // And we must've taken the previous point at least 1ms before. + if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) { + m->set_prev_time(t); + m->set_prev_event_count(event_count); + m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond + } else { + if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) { + // If nothing happened for 25ms, zero the rate. Don't modify prev values. + m->set_rate(0); + } + } + } +} + +// Check if this method has been stale for a given number of milliseconds. +// See select_task(). +bool TieredThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) { + jlong delta_s = t - SafepointTracing::end_of_last_safepoint_epoch_ms(); + jlong delta_t = t - m->prev_time(); + if (delta_t > timeout && delta_s > timeout) { + int event_count = m->invocation_count() + m->backedge_count(); + int delta_e = event_count - m->prev_event_count(); + // Return true if there were no events. + return delta_e == 0; + } + return false; +} + +// We don't remove old methods from the compile queue even if they have +// very low activity. See select_task(). +bool TieredThresholdPolicy::is_old(Method* method) { + return method->invocation_count() > 50000 || method->backedge_count() > 500000; +} + +double TieredThresholdPolicy::weight(Method* method) { + return (double)(method->rate() + 1) * + (method->invocation_count() + 1) * (method->backedge_count() + 1); +} + +// Apply heuristics and return true if x should be compiled before y +bool TieredThresholdPolicy::compare_methods(Method* x, Method* y) { + if (x->highest_comp_level() > y->highest_comp_level()) { + // recompilation after deopt + return true; + } else + if (x->highest_comp_level() == y->highest_comp_level()) { + if (weight(x) > weight(y)) { + return true; + } + } + return false; +} + +// Is method profiled enough? +bool TieredThresholdPolicy::is_method_profiled(Method* method) { + MethodData* mdo = method->method_data(); + if (mdo != NULL) { + int i = mdo->invocation_count_delta(); + int b = mdo->backedge_count_delta(); + return call_predicate_helper(i, b, 1, method); + } + return false; +} + +double TieredThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) { + double queue_size = CompileBroker::queue_size(level); + int comp_count = compiler_count(level); + double k = queue_size / (feedback_k * comp_count) + 1; + + // Increase C1 compile threshold when the code cache is filled more + // than specified by IncreaseFirstTierCompileThresholdAt percentage. + // The main intention is to keep enough free space for C2 compiled code + // to achieve peak performance if the code cache is under stress. + if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization)) { + double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level)); + if (current_reverse_free_ratio > _increase_threshold_at_ratio) { + k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio); + } + } + return k; +} + +// Call and loop predicates determine whether a transition to a higher +// compilation level should be performed (pointers to predicate functions +// are passed to common()). +// Tier?LoadFeedback is basically a coefficient that determines of +// how many methods per compiler thread can be in the queue before +// the threshold values double. +bool TieredThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) { + switch(cur_level) { + case CompLevel_aot: { + double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); + return loop_predicate_helper(i, b, k, method); + } + case CompLevel_none: + case CompLevel_limited_profile: { + double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); + return loop_predicate_helper(i, b, k, method); + } + case CompLevel_full_profile: { + double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback); + return loop_predicate_helper(i, b, k, method); + } + default: + return true; + } +} + +bool TieredThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) { + switch(cur_level) { + case CompLevel_aot: { + double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); + return call_predicate_helper(i, b, k, method); + } + case CompLevel_none: + case CompLevel_limited_profile: { + double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); + return call_predicate_helper(i, b, k, method); + } + case CompLevel_full_profile: { + double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback); + return call_predicate_helper(i, b, k, method); + } + default: + return true; + } +} + +// Determine is a method is mature. +bool TieredThresholdPolicy::is_mature(Method* method) { + if (should_compile_at_level_simple(method)) return true; + MethodData* mdo = method->method_data(); + if (mdo != NULL) { + int i = mdo->invocation_count(); + int b = mdo->backedge_count(); + double k = ProfileMaturityPercentage / 100.0; + return call_predicate_helper(i, b, k, method) || + loop_predicate_helper(i, b, k, method); + } + return false; +} + +// If a method is old enough and is still in the interpreter we would want to +// start profiling without waiting for the compiled method to arrive. +// We also take the load on compilers into the account. +bool TieredThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) { + if (cur_level == CompLevel_none && + CompileBroker::queue_size(CompLevel_full_optimization) <= + Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { + int i = method->invocation_count(); + int b = method->backedge_count(); + double k = Tier0ProfilingStartPercentage / 100.0; + return call_predicate_helper(i, b, k, method) || loop_predicate_helper(i, b, k, method); + } + return false; +} + +// Inlining control: if we're compiling a profiled method with C1 and the callee +// is known to have OSRed in a C2 version, don't inline it. +bool TieredThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) { + CompLevel comp_level = (CompLevel)env->comp_level(); + if (comp_level == CompLevel_full_profile || + comp_level == CompLevel_limited_profile) { + return callee->highest_osr_comp_level() == CompLevel_full_optimization; + } + return false; +} + +// Create MDO if necessary. +void TieredThresholdPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) { + if (mh->is_native() || + mh->is_abstract() || + mh->is_accessor() || + mh->is_constant_getter()) { + return; + } + if (mh->method_data() == NULL) { + Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR); + } +} + + +/* + * Method states: + * 0 - interpreter (CompLevel_none) + * 1 - pure C1 (CompLevel_simple) + * 2 - C1 with invocation and backedge counting (CompLevel_limited_profile) + * 3 - C1 with full profiling (CompLevel_full_profile) + * 4 - C2 (CompLevel_full_optimization) + * + * Common state transition patterns: + * a. 0 -> 3 -> 4. + * The most common path. But note that even in this straightforward case + * profiling can start at level 0 and finish at level 3. + * + * b. 0 -> 2 -> 3 -> 4. + * This case occurs when the load on C2 is deemed too high. So, instead of transitioning + * into state 3 directly and over-profiling while a method is in the C2 queue we transition to + * level 2 and wait until the load on C2 decreases. This path is disabled for OSRs. + * + * c. 0 -> (3->2) -> 4. + * In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough + * to enable the profiling to fully occur at level 0. In this case we change the compilation level + * of the method to 2 while the request is still in-queue, because it'll allow it to run much faster + * without full profiling while c2 is compiling. + * + * d. 0 -> 3 -> 1 or 0 -> 2 -> 1. + * After a method was once compiled with C1 it can be identified as trivial and be compiled to + * level 1. These transition can also occur if a method can't be compiled with C2 but can with C1. + * + * e. 0 -> 4. + * This can happen if a method fails C1 compilation (it will still be profiled in the interpreter) + * or because of a deopt that didn't require reprofiling (compilation won't happen in this case because + * the compiled version already exists). + * + * Note that since state 0 can be reached from any other state via deoptimization different loops + * are possible. + * + */ + +// Common transition function. Given a predicate determines if a method should transition to another level. +CompLevel TieredThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) { + CompLevel next_level = cur_level; + int i = method->invocation_count(); + int b = method->backedge_count(); + + if (should_compile_at_level_simple(method)) { + next_level = CompLevel_simple; + } else { + switch(cur_level) { + default: break; + case CompLevel_aot: { + // If we were at full profile level, would we switch to full opt? + if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) { + next_level = CompLevel_full_optimization; + } else if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= + Tier3DelayOff * compiler_count(CompLevel_full_optimization) && + (this->*p)(i, b, cur_level, method))) { + next_level = CompLevel_full_profile; + } + } + break; + case CompLevel_none: + // If we were at full profile level, would we switch to full opt? + if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) { + next_level = CompLevel_full_optimization; + } else if ((this->*p)(i, b, cur_level, method)) { +#if INCLUDE_JVMCI + if (EnableJVMCI && UseJVMCICompiler) { + // Since JVMCI takes a while to warm up, its queue inevitably backs up during + // early VM execution. As of 2014-06-13, JVMCI's inliner assumes that the root + // compilation method and all potential inlinees have mature profiles (which + // includes type profiling). If it sees immature profiles, JVMCI's inliner + // can perform pathologically bad (e.g., causing OutOfMemoryErrors due to + // exploring/inlining too many graphs). Since a rewrite of the inliner is + // in progress, we simply disable the dialing back heuristic for now and will + // revisit this decision once the new inliner is completed. + next_level = CompLevel_full_profile; + } else +#endif + { + // C1-generated fully profiled code is about 30% slower than the limited profile + // code that has only invocation and backedge counters. The observation is that + // if C2 queue is large enough we can spend too much time in the fully profiled code + // while waiting for C2 to pick the method from the queue. To alleviate this problem + // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long + // we choose to compile a limited profiled version and then recompile with full profiling + // when the load on C2 goes down. + if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > + Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { + next_level = CompLevel_limited_profile; + } else { + next_level = CompLevel_full_profile; + } + } + } + break; + case CompLevel_limited_profile: + if (is_method_profiled(method)) { + // Special case: we got here because this method was fully profiled in the interpreter. + next_level = CompLevel_full_optimization; + } else { + MethodData* mdo = method->method_data(); + if (mdo != NULL) { + if (mdo->would_profile()) { + if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= + Tier3DelayOff * compiler_count(CompLevel_full_optimization) && + (this->*p)(i, b, cur_level, method))) { + next_level = CompLevel_full_profile; + } + } else { + next_level = CompLevel_full_optimization; + } + } else { + // If there is no MDO we need to profile + if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= + Tier3DelayOff * compiler_count(CompLevel_full_optimization) && + (this->*p)(i, b, cur_level, method))) { + next_level = CompLevel_full_profile; + } + } + } + break; + case CompLevel_full_profile: + { + MethodData* mdo = method->method_data(); + if (mdo != NULL) { + if (mdo->would_profile()) { + int mdo_i = mdo->invocation_count_delta(); + int mdo_b = mdo->backedge_count_delta(); + if ((this->*p)(mdo_i, mdo_b, cur_level, method)) { + next_level = CompLevel_full_optimization; + } + } else { + next_level = CompLevel_full_optimization; + } + } + } + break; + } + } + return MIN2(next_level, (CompLevel)TieredStopAtLevel); +} + +// Determine if a method should be compiled with a normal entry point at a different level. +CompLevel TieredThresholdPolicy::call_event(Method* method, CompLevel cur_level, JavaThread * thread) { + CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), + common(&TieredThresholdPolicy::loop_predicate, method, cur_level, true)); + CompLevel next_level = common(&TieredThresholdPolicy::call_predicate, method, cur_level); + + // If OSR method level is greater than the regular method level, the levels should be + // equalized by raising the regular method level in order to avoid OSRs during each + // invocation of the method. + if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) { + MethodData* mdo = method->method_data(); + guarantee(mdo != NULL, "MDO should not be NULL"); + if (mdo->invocation_count() >= 1) { + next_level = CompLevel_full_optimization; + } + } else { + next_level = MAX2(osr_level, next_level); + } + return next_level; +} + +// Determine if we should do an OSR compilation of a given method. +CompLevel TieredThresholdPolicy::loop_event(Method* method, CompLevel cur_level, JavaThread* thread) { + CompLevel next_level = common(&TieredThresholdPolicy::loop_predicate, method, cur_level, true); + if (cur_level == CompLevel_none) { + // If there is a live OSR method that means that we deopted to the interpreter + // for the transition. + CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level); + if (osr_level > CompLevel_none) { + return osr_level; + } + } + return next_level; +} + +bool TieredThresholdPolicy::maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread) { + if (UseAOT) { + if (cur_level == CompLevel_full_profile || cur_level == CompLevel_none) { + // If the current level is full profile or interpreter and we're switching to any other level, + // activate the AOT code back first so that we won't waste time overprofiling. + compile(mh, InvocationEntryBci, CompLevel_aot, thread); + // Fall through for JIT compilation. + } + if (next_level == CompLevel_limited_profile && cur_level != CompLevel_aot && mh->has_aot_code()) { + // If the next level is limited profile, use the aot code (if there is any), + // since it's essentially the same thing. + compile(mh, InvocationEntryBci, CompLevel_aot, thread); + // Not need to JIT, we're done. + return true; + } + } + return false; +} + + +// Handle the invocation event. +void TieredThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh, + CompLevel level, CompiledMethod* nm, JavaThread* thread) { + if (should_create_mdo(mh(), level)) { + create_mdo(mh, thread); + } + CompLevel next_level = call_event(mh(), level, thread); + if (next_level != level) { + if (maybe_switch_to_aot(mh, level, next_level, thread)) { + // No JITting necessary + return; + } + if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) { + compile(mh, InvocationEntryBci, next_level, thread); + } + } +} + +// Handle the back branch event. Notice that we can compile the method +// with a regular entry from here. +void TieredThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh, + int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread) { + if (should_create_mdo(mh(), level)) { + create_mdo(mh, thread); + } + // Check if MDO should be created for the inlined method + if (should_create_mdo(imh(), level)) { + create_mdo(imh, thread); + } + + if (is_compilation_enabled()) { + CompLevel next_osr_level = loop_event(imh(), level, thread); + CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level(); + // At the very least compile the OSR version + if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) { + compile(imh, bci, next_osr_level, thread); + } + + // Use loop event as an opportunity to also check if there's been + // enough calls. + CompLevel cur_level, next_level; + if (mh() != imh()) { // If there is an enclosing method + if (level == CompLevel_aot) { + // Recompile the enclosing method to prevent infinite OSRs. Stay at AOT level while it's compiling. + if (max_osr_level != CompLevel_none && !CompileBroker::compilation_is_in_queue(mh)) { + compile(mh, InvocationEntryBci, MIN2((CompLevel)TieredStopAtLevel, CompLevel_full_profile), thread); + } + } else { + // Current loop event level is not AOT + guarantee(nm != NULL, "Should have nmethod here"); + cur_level = comp_level(mh()); + next_level = call_event(mh(), cur_level, thread); + + if (max_osr_level == CompLevel_full_optimization) { + // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts + bool make_not_entrant = false; + if (nm->is_osr_method()) { + // This is an osr method, just make it not entrant and recompile later if needed + make_not_entrant = true; + } else { + if (next_level != CompLevel_full_optimization) { + // next_level is not full opt, so we need to recompile the + // enclosing method without the inlinee + cur_level = CompLevel_none; + make_not_entrant = true; + } + } + if (make_not_entrant) { + if (PrintTieredEvents) { + int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci; + print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level); + } + nm->make_not_entrant(); + } + } + // Fix up next_level if necessary to avoid deopts + if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) { + next_level = CompLevel_full_profile; + } + if (cur_level != next_level) { + if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) { + compile(mh, InvocationEntryBci, next_level, thread); + } + } + } + } else { + cur_level = comp_level(mh()); + next_level = call_event(mh(), cur_level, thread); + if (next_level != cur_level) { + if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) { + compile(mh, InvocationEntryBci, next_level, thread); + } + } + } + } +} + +#endif diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/compiler/tieredThresholdPolicy.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/compiler/tieredThresholdPolicy.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -0,0 +1,278 @@ +/* + * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_COMPILER_TIEREDTHRESHOLDPOLICY_HPP +#define SHARE_COMPILER_TIEREDTHRESHOLDPOLICY_HPP + +#include "code/nmethod.hpp" +#include "compiler/compilationPolicy.hpp" +#include "oops/methodData.hpp" +#include "utilities/globalDefinitions.hpp" + +#ifdef TIERED + +class CompileTask; +class CompileQueue; +/* + * The system supports 5 execution levels: + * * level 0 - interpreter + * * level 1 - C1 with full optimization (no profiling) + * * level 2 - C1 with invocation and backedge counters + * * level 3 - C1 with full profiling (level 2 + MDO) + * * level 4 - C2 + * + * Levels 0, 2 and 3 periodically notify the runtime about the current value of the counters + * (invocation counters and backedge counters). The frequency of these notifications is + * different at each level. These notifications are used by the policy to decide what transition + * to make. + * + * Execution starts at level 0 (interpreter), then the policy can decide either to compile the + * method at level 3 or level 2. The decision is based on the following factors: + * 1. The length of the C2 queue determines the next level. The observation is that level 2 + * is generally faster than level 3 by about 30%, therefore we would want to minimize the time + * a method spends at level 3. We should only spend the time at level 3 that is necessary to get + * adequate profiling. So, if the C2 queue is long enough it is more beneficial to go first to + * level 2, because if we transitioned to level 3 we would be stuck there until our C2 compile + * request makes its way through the long queue. When the load on C2 recedes we are going to + * recompile at level 3 and start gathering profiling information. + * 2. The length of C1 queue is used to dynamically adjust the thresholds, so as to introduce + * additional filtering if the compiler is overloaded. The rationale is that by the time a + * method gets compiled it can become unused, so it doesn't make sense to put too much onto the + * queue. + * + * After profiling is completed at level 3 the transition is made to level 4. Again, the length + * of the C2 queue is used as a feedback to adjust the thresholds. + * + * After the first C1 compile some basic information is determined about the code like the number + * of the blocks and the number of the loops. Based on that it can be decided that a method + * is trivial and compiling it with C1 will yield the same code. In this case the method is + * compiled at level 1 instead of 4. + * + * We also support profiling at level 0. If C1 is slow enough to produce the level 3 version of + * the code and the C2 queue is sufficiently small we can decide to start profiling in the + * interpreter (and continue profiling in the compiled code once the level 3 version arrives). + * If the profiling at level 0 is fully completed before level 3 version is produced, a level 2 + * version is compiled instead in order to run faster waiting for a level 4 version. + * + * Compile queues are implemented as priority queues - for each method in the queue we compute + * the event rate (the number of invocation and backedge counter increments per unit of time). + * When getting an element off the queue we pick the one with the largest rate. Maintaining the + * rate also allows us to remove stale methods (the ones that got on the queue but stopped + * being used shortly after that). +*/ + +/* Command line options: + * - Tier?InvokeNotifyFreqLog and Tier?BackedgeNotifyFreqLog control the frequency of method + * invocation and backedge notifications. Basically every n-th invocation or backedge a mutator thread + * makes a call into the runtime. + * + * - Tier?InvocationThreshold, Tier?CompileThreshold, Tier?BackEdgeThreshold, Tier?MinInvocationThreshold control + * compilation thresholds. + * Level 2 thresholds are not used and are provided for option-compatibility and potential future use. + * Other thresholds work as follows: + * + * Transition from interpreter (level 0) to C1 with full profiling (level 3) happens when + * the following predicate is true (X is the level): + * + * i > TierXInvocationThreshold * s || (i > TierXMinInvocationThreshold * s && i + b > TierXCompileThreshold * s), + * + * where $i$ is the number of method invocations, $b$ number of backedges and $s$ is the scaling + * coefficient that will be discussed further. + * The intuition is to equalize the time that is spend profiling each method. + * The same predicate is used to control the transition from level 3 to level 4 (C2). It should be + * noted though that the thresholds are relative. Moreover i and b for the 0->3 transition come + * from Method* and for 3->4 transition they come from MDO (since profiled invocations are + * counted separately). Finally, if a method does not contain anything worth profiling, a transition + * from level 3 to level 4 occurs without considering thresholds (e.g., with fewer invocations than + * what is specified by Tier4InvocationThreshold). + * + * OSR transitions are controlled simply with b > TierXBackEdgeThreshold * s predicates. + * + * - Tier?LoadFeedback options are used to automatically scale the predicates described above depending + * on the compiler load. The scaling coefficients are computed as follows: + * + * s = queue_size_X / (TierXLoadFeedback * compiler_count_X) + 1, + * + * where queue_size_X is the current size of the compiler queue of level X, and compiler_count_X + * is the number of level X compiler threads. + * + * Basically these parameters describe how many methods should be in the compile queue + * per compiler thread before the scaling coefficient increases by one. + * + * This feedback provides the mechanism to automatically control the flow of compilation requests + * depending on the machine speed, mutator load and other external factors. + * + * - Tier3DelayOn and Tier3DelayOff parameters control another important feedback loop. + * Consider the following observation: a method compiled with full profiling (level 3) + * is about 30% slower than a method at level 2 (just invocation and backedge counters, no MDO). + * Normally, the following transitions will occur: 0->3->4. The problem arises when the C2 queue + * gets congested and the 3->4 transition is delayed. While the method is the C2 queue it continues + * executing at level 3 for much longer time than is required by the predicate and at suboptimal speed. + * The idea is to dynamically change the behavior of the system in such a way that if a substantial + * load on C2 is detected we would first do the 0->2 transition allowing a method to run faster. + * And then when the load decreases to allow 2->3 transitions. + * + * Tier3Delay* parameters control this switching mechanism. + * Tier3DelayOn is the number of methods in the C2 queue per compiler thread after which the policy + * no longer does 0->3 transitions but does 0->2 transitions instead. + * Tier3DelayOff switches the original behavior back when the number of methods in the C2 queue + * per compiler thread falls below the specified amount. + * The hysteresis is necessary to avoid jitter. + * + * - TieredCompileTaskTimeout is the amount of time an idle method can spend in the compile queue. + * Basically, since we use the event rate d(i + b)/dt as a value of priority when selecting a method to + * compile from the compile queue, we also can detect stale methods for which the rate has been + * 0 for some time in the same iteration. Stale methods can appear in the queue when an application + * abruptly changes its behavior. + * + * - TieredStopAtLevel, is used mostly for testing. It allows to bypass the policy logic and stick + * to a given level. For example it's useful to set TieredStopAtLevel = 1 in order to compile everything + * with pure c1. + * + * - Tier0ProfilingStartPercentage allows the interpreter to start profiling when the inequalities in the + * 0->3 predicate are already exceeded by the given percentage but the level 3 version of the + * method is still not ready. We can even go directly from level 0 to 4 if c1 doesn't produce a compiled + * version in time. This reduces the overall transition to level 4 and decreases the startup time. + * Note that this behavior is also guarded by the Tier3Delay mechanism: when the c2 queue is too long + * these is not reason to start profiling prematurely. + * + * - TieredRateUpdateMinTime and TieredRateUpdateMaxTime are parameters of the rate computation. + * Basically, the rate is not computed more frequently than TieredRateUpdateMinTime and is considered + * to be zero if no events occurred in TieredRateUpdateMaxTime. + */ + +class TieredThresholdPolicy : public CompilationPolicy { + jlong _start_time; + int _c1_count, _c2_count; + + // Check if the counter is big enough and set carry (effectively infinity). + inline void set_carry_if_necessary(InvocationCounter *counter); + // Set carry flags in the counters (in Method* and MDO). + inline void handle_counter_overflow(Method* method); + // Call and loop predicates determine whether a transition to a higher compilation + // level should be performed (pointers to predicate functions are passed to common_TF(). + // Predicates also take compiler load into account. + typedef bool (TieredThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level, Method* method); + bool call_predicate(int i, int b, CompLevel cur_level, Method* method); + bool loop_predicate(int i, int b, CompLevel cur_level, Method* method); + // Common transition function. Given a predicate determines if a method should transition to another level. + CompLevel common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback = false); + // Transition functions. + // call_event determines if a method should be compiled at a different + // level with a regular invocation entry. + CompLevel call_event(Method* method, CompLevel cur_level, JavaThread* thread); + // loop_event checks if a method should be OSR compiled at a different + // level. + CompLevel loop_event(Method* method, CompLevel cur_level, JavaThread* thread); + void print_counters(const char* prefix, const methodHandle& mh); + // Has a method been long around? + // We don't remove old methods from the compile queue even if they have + // very low activity (see select_task()). + inline bool is_old(Method* method); + // Was a given method inactive for a given number of milliseconds. + // If it is, we would remove it from the queue (see select_task()). + inline bool is_stale(jlong t, jlong timeout, Method* m); + // Compute the weight of the method for the compilation scheduling + inline double weight(Method* method); + // Apply heuristics and return true if x should be compiled before y + inline bool compare_methods(Method* x, Method* y); + // Compute event rate for a given method. The rate is the number of event (invocations + backedges) + // per millisecond. + inline void update_rate(jlong t, Method* m); + // Compute threshold scaling coefficient + inline double threshold_scale(CompLevel level, int feedback_k); + // If a method is old enough and is still in the interpreter we would want to + // start profiling without waiting for the compiled method to arrive. This function + // determines whether we should do that. + inline bool should_create_mdo(Method* method, CompLevel cur_level); + // Create MDO if necessary. + void create_mdo(const methodHandle& mh, JavaThread* thread); + // Is method profiled enough? + bool is_method_profiled(Method* method); + + double _increase_threshold_at_ratio; + + bool maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread); + + int c1_count() const { return _c1_count; } + int c2_count() const { return _c2_count; } + void set_c1_count(int x) { _c1_count = x; } + void set_c2_count(int x) { _c2_count = x; } + + enum EventType { CALL, LOOP, COMPILE, REMOVE_FROM_QUEUE, UPDATE_IN_QUEUE, REPROFILE, MAKE_NOT_ENTRANT }; + void print_event(EventType type, const methodHandle& mh, const methodHandle& imh, int bci, CompLevel level); + // Print policy-specific information if necessary + void print_specific(EventType type, const methodHandle& mh, const methodHandle& imh, int bci, CompLevel level); + // Check if the method can be compiled, change level if necessary + void compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread); + // Submit a given method for compilation + void submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread); + // Simple methods are as good being compiled with C1 as C2. + // This function tells if it's such a function. + inline static bool is_trivial(Method* method); + // Force method to be compiled at CompLevel_simple? + inline static bool should_compile_at_level_simple(Method* method); + + // Predicate helpers are used by .*_predicate() methods as well as others. + // They check the given counter values, multiplied by the scale against the thresholds. + template static inline bool call_predicate_helper(int i, int b, double scale, Method* method); + template static inline bool loop_predicate_helper(int i, int b, double scale, Method* method); + + // Get a compilation level for a given method. + static CompLevel comp_level(Method* method); + void method_invocation_event(const methodHandle& method, const methodHandle& inlinee, + CompLevel level, CompiledMethod* nm, JavaThread* thread); + void method_back_branch_event(const methodHandle& method, const methodHandle& inlinee, + int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread); + + void set_increase_threshold_at_ratio() { _increase_threshold_at_ratio = 100 / (100 - (double)IncreaseFirstTierCompileThresholdAt); } + void set_start_time(jlong t) { _start_time = t; } + jlong start_time() const { return _start_time; } + +public: + TieredThresholdPolicy() : _start_time(0), _c1_count(0), _c2_count(0) { } + virtual int compiler_count(CompLevel comp_level) { + if (is_c1_compile(comp_level)) return c1_count(); + if (is_c2_compile(comp_level)) return c2_count(); + return 0; + } + virtual CompLevel initial_compile_level() { return MIN2((CompLevel)TieredStopAtLevel, CompLevel_initial_compile); } + virtual void do_safepoint_work() { } + virtual void delay_compilation(Method* method) { } + virtual void disable_compilation(Method* method) { } + virtual void reprofile(ScopeDesc* trap_scope, bool is_osr); + virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, + int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread); + // Select task is called by CompileBroker. We should return a task or NULL. + virtual CompileTask* select_task(CompileQueue* compile_queue); + // Tell the runtime if we think a given method is adequately profiled. + virtual bool is_mature(Method* method); + // Initialize: set compiler thread count + virtual void initialize(); + virtual bool should_not_inline(ciEnv* env, ciMethod* callee); +}; + +#endif // TIERED + +#endif // SHARE_COMPILER_TIEREDTHRESHOLDPOLICY_HPP diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/epsilon/epsilonArguments.cpp --- a/src/hotspot/share/gc/epsilon/epsilonArguments.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/epsilon/epsilonArguments.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -45,13 +45,25 @@ FLAG_SET_DEFAULT(ExitOnOutOfMemoryError, true); } + // Warn users that non-resizable heap might be better for some configurations. + // We are not adjusting the heap size by ourselves, because it affects startup time. + if (InitialHeapSize != MaxHeapSize) { + log_warning(gc)("Consider setting -Xms equal to -Xmx to avoid resizing hiccups"); + } + + // Warn users that AlwaysPreTouch might be better for some configurations. + // We are not turning this on by ourselves, because it affects startup time. + if (FLAG_IS_DEFAULT(AlwaysPreTouch) && !AlwaysPreTouch) { + log_warning(gc)("Consider enabling -XX:+AlwaysPreTouch to avoid memory commit hiccups"); + } + if (EpsilonMaxTLABSize < MinTLABSize) { - warning("EpsilonMaxTLABSize < MinTLABSize, adjusting it to " SIZE_FORMAT, MinTLABSize); + log_warning(gc)("EpsilonMaxTLABSize < MinTLABSize, adjusting it to " SIZE_FORMAT, MinTLABSize); EpsilonMaxTLABSize = MinTLABSize; } if (!EpsilonElasticTLAB && EpsilonElasticTLABDecay) { - warning("Disabling EpsilonElasticTLABDecay because EpsilonElasticTLAB is disabled"); + log_warning(gc)("Disabling EpsilonElasticTLABDecay because EpsilonElasticTLAB is disabled"); FLAG_SET_DEFAULT(EpsilonElasticTLABDecay, false); } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/g1/g1Analytics.cpp --- a/src/hotspot/share/gc/g1/g1Analytics.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/g1/g1Analytics.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -78,6 +78,8 @@ _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), _prev_collection_pause_end_ms(0.0), _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), + _concurrent_refine_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _logged_cards_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), _cost_per_logged_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)), _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), @@ -102,6 +104,10 @@ int index = MIN2(ParallelGCThreads - 1, 7u); _rs_length_diff_seq->add(rs_length_diff_defaults[index]); + // Start with inverse of maximum STW cost. + _concurrent_refine_rate_ms_seq->add(1/cost_per_logged_card_ms_defaults[0]); + // Some applications have very low rates for logging cards. + _logged_cards_rate_ms_seq->add(0.0); _cost_per_logged_card_ms_seq->add(cost_per_logged_card_ms_defaults[index]); _cost_scan_hcc_seq->add(0.0); _young_cards_per_entry_ratio_seq->add(young_cards_per_entry_ratio_defaults[index]); @@ -159,6 +165,14 @@ (pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms; } +void G1Analytics::report_concurrent_refine_rate_ms(double cards_per_ms) { + _concurrent_refine_rate_ms_seq->add(cards_per_ms); +} + +void G1Analytics::report_logged_cards_rate_ms(double cards_per_ms) { + _logged_cards_rate_ms_seq->add(cards_per_ms); +} + void G1Analytics::report_cost_per_logged_card_ms(double cost_per_logged_card_ms) { _cost_per_logged_card_ms_seq->add(cost_per_logged_card_ms); } @@ -223,6 +237,14 @@ return get_new_prediction(_alloc_rate_ms_seq); } +double G1Analytics::predict_concurrent_refine_rate_ms() const { + return get_new_prediction(_concurrent_refine_rate_ms_seq); +} + +double G1Analytics::predict_logged_cards_rate_ms() const { + return get_new_prediction(_logged_cards_rate_ms_seq); +} + double G1Analytics::predict_cost_per_logged_card_ms() const { return get_new_prediction(_cost_per_logged_card_ms_seq); } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/g1/g1Analytics.hpp --- a/src/hotspot/share/gc/g1/g1Analytics.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/g1/g1Analytics.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -46,6 +46,8 @@ double _prev_collection_pause_end_ms; TruncatedSeq* _rs_length_diff_seq; + TruncatedSeq* _concurrent_refine_rate_ms_seq; + TruncatedSeq* _logged_cards_rate_ms_seq; TruncatedSeq* _cost_per_logged_card_ms_seq; TruncatedSeq* _cost_scan_hcc_seq; TruncatedSeq* _young_cards_per_entry_ratio_seq; @@ -99,6 +101,8 @@ void report_concurrent_mark_remark_times_ms(double ms); void report_concurrent_mark_cleanup_times_ms(double ms); void report_alloc_rate_ms(double alloc_rate); + void report_concurrent_refine_rate_ms(double cards_per_ms); + void report_logged_cards_rate_ms(double cards_per_ms); void report_cost_per_logged_card_ms(double cost_per_logged_card_ms); void report_cost_scan_hcc(double cost_scan_hcc); void report_cost_per_remset_card_ms(double cost_per_remset_card_ms, bool for_young_gc); @@ -116,6 +120,8 @@ double predict_alloc_rate_ms() const; int num_alloc_rate_ms() const; + double predict_concurrent_refine_rate_ms() const; + double predict_logged_cards_rate_ms() const; double predict_cost_per_logged_card_ms() const; double predict_scan_hcc_ms() const; diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/g1/g1CollectedHeap.cpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -4076,7 +4076,7 @@ Atomic::add(r->rem_set()->occupied_locked(), &_rs_length); if (!is_young) { - g1h->_hot_card_cache->reset_card_counts(r); + g1h->hot_card_cache()->reset_card_counts(r); } if (!evacuation_failed) { diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/g1/g1CollectedHeap.hpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -129,7 +129,6 @@ }; class G1CollectedHeap : public CollectedHeap { - friend class G1FreeCollectionSetTask; friend class VM_CollectForMetadataAllocation; friend class VM_G1CollectForAllocation; friend class VM_G1CollectFull; @@ -1138,7 +1137,7 @@ return _reserved.contains(addr); } - G1HotCardCache* g1_hot_card_cache() const { return _hot_card_cache; } + G1HotCardCache* hot_card_cache() const { return _hot_card_cache; } G1CardTable* card_table() const { return _card_table; diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/g1/g1CollectionSet.cpp --- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -409,7 +409,7 @@ guarantee(target_pause_time_ms > 0.0, "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms); - size_t pending_cards = _policy->pending_cards(); + size_t pending_cards = _policy->pending_cards_at_gc_start(); double base_time_ms = _policy->predict_base_elapsed_time_ms(pending_cards); double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp --- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -412,6 +412,22 @@ dcqs.notify_if_necessary(); } +G1ConcurrentRefine::RefinementStats G1ConcurrentRefine::total_refinement_stats() const { + struct CollectData : public ThreadClosure { + Tickspan _total_time; + size_t _total_cards; + CollectData() : _total_time(), _total_cards(0) {} + virtual void do_thread(Thread* t) { + G1ConcurrentRefineThread* crt = static_cast(t); + _total_time += crt->total_refinement_time(); + _total_cards += crt->total_refined_cards(); + } + } collector; + // Cast away const so we can call non-modifying closure on threads. + const_cast(this)->threads_do(&collector); + return RefinementStats(collector._total_time, collector._total_cards); +} + size_t G1ConcurrentRefine::activation_threshold(uint worker_id) const { Thresholds thresholds = calc_thresholds(_green_zone, _yellow_zone, worker_id); return activation_level(thresholds); @@ -432,7 +448,8 @@ } } -bool G1ConcurrentRefine::do_refinement_step(uint worker_id) { +bool G1ConcurrentRefine::do_refinement_step(uint worker_id, + size_t* total_refined_cards) { G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); size_t curr_cards = dcqs.num_cards(); @@ -448,5 +465,6 @@ // Process the next buffer, if there are enough left. return dcqs.refine_completed_buffer_concurrently(worker_id + worker_id_offset(), - deactivation_threshold(worker_id)); + deactivation_threshold(worker_id), + total_refined_cards); } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp --- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -27,6 +27,7 @@ #include "memory/allocation.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/ticks.hpp" // Forward decl class G1ConcurrentRefine; @@ -118,11 +119,22 @@ // Adjust refinement thresholds based on work done during the pause and the goal time. void adjust(double logged_cards_scan_time, size_t processed_logged_cards, double goal_ms); + struct RefinementStats { + Tickspan _time; + size_t _cards; + RefinementStats(Tickspan time, size_t cards) : _time(time), _cards(cards) {} + }; + + RefinementStats total_refinement_stats() const; + // Cards in the dirty card queue set. size_t activation_threshold(uint worker_id) const; size_t deactivation_threshold(uint worker_id) const; - // Perform a single refinement step. Called by the refinement threads when woken up. - bool do_refinement_step(uint worker_id); + + // Perform a single refinement step; called by the refinement + // threads. Returns true if there was refinement work available. + // Increments *total_refined_cards. + bool do_refinement_step(uint worker_id, size_t* total_refined_cards); // Iterate over all concurrent refinement threads applying the given closure. void threads_do(ThreadClosure *tc); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp --- a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -37,6 +37,8 @@ ConcurrentGCThread(), _vtime_start(0.0), _vtime_accum(0.0), + _total_refinement_time(), + _total_refined_cards(0), _worker_id(worker_id), _active(false), _monitor(NULL), @@ -101,11 +103,12 @@ break; } - size_t buffers_processed = 0; log_debug(gc, refine)("Activated worker %d, on threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT, _worker_id, _cr->activation_threshold(_worker_id), G1BarrierSet::dirty_card_queue_set().num_cards()); + size_t start_total_refined_cards = _total_refined_cards; // For logging. + { SuspendibleThreadSetJoiner sts_join; @@ -115,20 +118,22 @@ continue; // Re-check for termination after yield delay. } - if (!_cr->do_refinement_step(_worker_id)) { - break; + Ticks start_time = Ticks::now(); + if (!_cr->do_refinement_step(_worker_id, &_total_refined_cards)) { + break; // No cards to process. } - ++buffers_processed; + _total_refinement_time += (Ticks::now() - start_time); } } deactivate(); log_debug(gc, refine)("Deactivated worker %d, off threshold: " SIZE_FORMAT - ", current: " SIZE_FORMAT ", buffers processed: " - SIZE_FORMAT, + ", current: " SIZE_FORMAT ", refined cards: " + SIZE_FORMAT ", total refined cards: " SIZE_FORMAT, _worker_id, _cr->deactivation_threshold(_worker_id), G1BarrierSet::dirty_card_queue_set().num_cards(), - buffers_processed); + _total_refined_cards - start_total_refined_cards, + _total_refined_cards); if (os::supports_vtime()) { _vtime_accum = (os::elapsedVTime() - _vtime_start); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp --- a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -26,6 +26,7 @@ #define SHARE_GC_G1_G1CONCURRENTREFINETHREAD_HPP #include "gc/shared/concurrentGCThread.hpp" +#include "utilities/ticks.hpp" // Forward Decl. class G1ConcurrentRefine; @@ -38,6 +39,10 @@ double _vtime_start; // Initial virtual time. double _vtime_accum; // Accumulated virtual time. + + Tickspan _total_refinement_time; + size_t _total_refined_cards; + uint _worker_id; bool _active; @@ -61,6 +66,9 @@ // Activate this thread. void activate(); + Tickspan total_refinement_time() const { return _total_refinement_time; } + size_t total_refined_cards() const { return _total_refined_cards; } + // Total virtual time so far. double vtime_accum() { return _vtime_accum; } }; diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp --- a/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -37,6 +37,7 @@ #include "runtime/atomic.hpp" #include "runtime/flags/flagSetting.hpp" #include "runtime/mutexLocker.hpp" +#include "runtime/os.hpp" #include "runtime/safepoint.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threadSMR.hpp" @@ -62,6 +63,9 @@ } } +// Assumed to be zero by concurrent threads. +static uint par_ids_start() { return 0; } + G1DirtyCardQueueSet::G1DirtyCardQueueSet(Monitor* cbl_mon, BufferNode::Allocator* allocator) : PtrQueueSet(allocator), @@ -73,15 +77,16 @@ _process_completed_buffers(false), _max_cards(MaxCardsUnlimited), _max_cards_padding(0), - _free_ids(0, num_par_ids()), - _processed_buffers_mut(0), - _processed_buffers_rs_thread(0) + _free_ids(par_ids_start(), num_par_ids()), + _mutator_refined_cards_counters(NEW_C_HEAP_ARRAY(size_t, num_par_ids(), mtGC)) { + ::memset(_mutator_refined_cards_counters, 0, num_par_ids() * sizeof(size_t)); _all_active = true; } G1DirtyCardQueueSet::~G1DirtyCardQueueSet() { abandon_completed_buffers(); + FREE_C_HEAP_ARRAY(size_t, _mutator_refined_cards_counters); } // Determines how many mutator threads can process the buffers in parallel. @@ -89,6 +94,14 @@ return (uint)os::initial_active_processor_count(); } +size_t G1DirtyCardQueueSet::total_mutator_refined_cards() const { + size_t sum = 0; + for (uint i = 0; i < num_par_ids(); ++i) { + sum += _mutator_refined_cards_counters[i]; + } + return sum; +} + void G1DirtyCardQueueSet::handle_zero_index_for_thread(Thread* t) { G1ThreadLocalData::dirty_card_queue(t).handle_zero_index(); } @@ -213,7 +226,9 @@ return result; } -bool G1DirtyCardQueueSet::refine_buffer(BufferNode* node, uint worker_id) { +bool G1DirtyCardQueueSet::refine_buffer(BufferNode* node, + uint worker_id, + size_t* total_refined_cards) { G1RemSet* rem_set = G1CollectedHeap::heap()->rem_set(); size_t size = buffer_size(); void** buffer = BufferNode::make_buffer_from_node(node); @@ -223,6 +238,7 @@ CardTable::CardValue* cp = static_cast(buffer[i]); rem_set->refine_card_concurrently(cp, worker_id); } + *total_refined_cards += (i - node->index()); node->set_index(i); return i == size; } @@ -260,25 +276,27 @@ bool G1DirtyCardQueueSet::mut_process_buffer(BufferNode* node) { uint worker_id = _free_ids.claim_par_id(); // temporarily claim an id - bool result = refine_buffer(node, worker_id); + uint counter_index = worker_id - par_ids_start(); + size_t* counter = &_mutator_refined_cards_counters[counter_index]; + bool result = refine_buffer(node, worker_id, counter); _free_ids.release_par_id(worker_id); // release the id if (result) { assert_fully_consumed(node, buffer_size()); - Atomic::inc(&_processed_buffers_mut); } return result; } -bool G1DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_id, size_t stop_at) { +bool G1DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_id, + size_t stop_at, + size_t* total_refined_cards) { BufferNode* node = get_completed_buffer(stop_at); if (node == NULL) { return false; - } else if (refine_buffer(node, worker_id)) { + } else if (refine_buffer(node, worker_id, total_refined_cards)) { assert_fully_consumed(node, buffer_size()); // Done with fully processed buffer. deallocate_buffer(node); - Atomic::inc(&_processed_buffers_rs_thread); return true; } else { // Return partially processed buffer to the queue. diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp --- a/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -78,14 +78,15 @@ void abandon_completed_buffers(); - // Refine the cards in "node" from it's index to buffer_size. + // Refine the cards in "node" from its index to buffer_size. // Stops processing if SuspendibleThreadSet::should_yield() is true. // Returns true if the entire buffer was processed, false if there // is a pending yield request. The node's index is updated to exclude // the processed elements, e.g. up to the element before processing // stopped, or one past the last element if the entire buffer was - // processed. - bool refine_buffer(BufferNode* node, uint worker_id); + // processed. Increments *total_refined_cards by the number of cards + // processed and removed from the buffer. + bool refine_buffer(BufferNode* node, uint worker_id, size_t* total_refined_cards); bool mut_process_buffer(BufferNode* node); @@ -97,10 +98,9 @@ G1FreeIdSet _free_ids; - // The number of completed buffers processed by mutator and rs thread, - // respectively. - jint _processed_buffers_mut; - jint _processed_buffers_rs_thread; + // Array of cumulative dirty cards refined by mutator threads. + // Array has an entry per id in _free_ids. + size_t* _mutator_refined_cards_counters; public: G1DirtyCardQueueSet(Monitor* cbl_mon, BufferNode::Allocator* allocator); @@ -158,7 +158,12 @@ // Stops processing a buffer if SuspendibleThreadSet::should_yield(), // returning the incompletely processed buffer to the completed buffer // list, for later processing of the remainder. - bool refine_completed_buffer_concurrently(uint worker_id, size_t stop_at); + // + // Increments *total_refined_cards by the number of cards processed and + // removed from the buffer. + bool refine_completed_buffer_concurrently(uint worker_id, + size_t stop_at, + size_t* total_refined_cards); // If a full collection is happening, reset partial logs, and release // completed ones: the full collection will make them all irrelevant. @@ -181,13 +186,8 @@ return _max_cards_padding; } - jint processed_buffers_mut() { - return _processed_buffers_mut; - } - jint processed_buffers_rs_thread() { - return _processed_buffers_rs_thread; - } - + // Total dirty cards refined by mutator threads. + size_t total_mutator_refined_cards() const; }; inline G1DirtyCardQueueSet* G1DirtyCardQueue::dirty_card_qset() const { diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp --- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -114,8 +114,9 @@ hr->rem_set()->clear(); hr->clear_cardtable(); - if (_g1h->g1_hot_card_cache()->use_cache()) { - _g1h->g1_hot_card_cache()->reset_card_counts(hr); + G1HotCardCache* hcc = _g1h->hot_card_cache(); + if (hcc->use_cache()) { + hcc->reset_card_counts(hr); } } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/g1/g1Policy.cpp --- a/src/hotspot/share/gc/g1/g1Policy.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/g1/g1Policy.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -70,7 +70,11 @@ _free_regions_at_end_of_collection(0), _max_rs_length(0), _rs_length_prediction(0), - _pending_cards(0), + _pending_cards_at_gc_start(0), + _pending_cards_at_prev_gc_end(0), + _total_mutator_refined_cards(0), + _total_concurrent_refined_cards(0), + _total_concurrent_refinement_time(), _bytes_allocated_in_old_since_last_gc(0), _initial_mark_to_mixed(), _collection_set(NULL), @@ -442,6 +446,7 @@ collector_state()->set_in_young_only_phase(false); collector_state()->set_in_full_gc(true); _collection_set->clear_candidates(); + record_concurrent_refinement_data(true /* is_full_collection */); } void G1Policy::record_full_collection_end() { @@ -472,12 +477,67 @@ _survivor_surv_rate_group->reset(); update_young_list_max_and_target_length(); update_rs_length_prediction(); + _pending_cards_at_prev_gc_end = _g1h->pending_card_num(); _bytes_allocated_in_old_since_last_gc = 0; record_pause(FullGC, _full_collection_start_sec, end_sec); } +void G1Policy::record_concurrent_refinement_data(bool is_full_collection) { + _pending_cards_at_gc_start = _g1h->pending_card_num(); + + // Record info about concurrent refinement thread processing. + G1ConcurrentRefine* cr = _g1h->concurrent_refine(); + G1ConcurrentRefine::RefinementStats cr_stats = cr->total_refinement_stats(); + + Tickspan cr_time = cr_stats._time - _total_concurrent_refinement_time; + _total_concurrent_refinement_time = cr_stats._time; + + size_t cr_cards = cr_stats._cards - _total_concurrent_refined_cards; + _total_concurrent_refined_cards = cr_stats._cards; + + // Don't update rate if full collection. We could be in an implicit full + // collection after a non-full collection failure, in which case there + // wasn't any mutator/cr-thread activity since last recording. And if + // we're in an explicit full collection, the time since the last GC can + // be arbitrarily short, so not a very good sample. Similarly, don't + // update the rate if the current sample is empty or time is zero. + if (!is_full_collection && (cr_cards > 0) && (cr_time > Tickspan())) { + double rate = cr_cards / (cr_time.seconds() * MILLIUNITS); + _analytics->report_concurrent_refine_rate_ms(rate); + } + + // Record info about mutator thread processing. + G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); + size_t mut_total_cards = dcqs.total_mutator_refined_cards(); + size_t mut_cards = mut_total_cards - _total_mutator_refined_cards; + _total_mutator_refined_cards = mut_total_cards; + + // Record mutator's card logging rate. + // Don't update if full collection; see above. + if (!is_full_collection) { + size_t total_cards = _pending_cards_at_gc_start + cr_cards + mut_cards; + assert(_pending_cards_at_prev_gc_end <= total_cards, + "untracked cards: last pending: " SIZE_FORMAT + ", pending: " SIZE_FORMAT ", conc refine: " SIZE_FORMAT + ", mut refine:" SIZE_FORMAT, + _pending_cards_at_prev_gc_end, _pending_cards_at_gc_start, + cr_cards, mut_cards); + size_t logged_cards = total_cards - _pending_cards_at_prev_gc_end; + double logging_start_time = _analytics->prev_collection_pause_end_ms(); + double logging_end_time = Ticks::now().seconds() * MILLIUNITS; + double logging_time = logging_end_time - logging_start_time; + // Unlike above for conc-refine rate, here we should not require a + // non-empty sample, since an application could go some time with only + // young-gen or filtered out writes. But we'll ignore unusually short + // sample periods, as they may just pollute the predictions. + if (logging_time > 1.0) { // Require > 1ms sample time. + _analytics->report_logged_cards_rate_ms(logged_cards / logging_time); + } + } +} + void G1Policy::record_collection_pause_start(double start_time_sec) { // We only need to do this here as the policy will only be applied // to the GC we're about to start. so, no point is calculating this @@ -490,7 +550,8 @@ assert_used_and_recalculate_used_equal(_g1h); phase_times()->record_cur_collection_start_sec(start_time_sec); - _pending_cards = _g1h->pending_card_num(); + + record_concurrent_refinement_data(false /* is_full_collection */); _collection_set->reset_bytes_used_before(); _bytes_copied_during_gc = 0; @@ -744,7 +805,7 @@ // after the mixed gc phase. // During mixed gc we do not use them for young gen sizing. if (this_pause_was_young_only) { - _analytics->report_pending_cards((double) _pending_cards); + _analytics->report_pending_cards((double) _pending_cards_at_gc_start); _analytics->report_rs_length((double) _max_rs_length); } } @@ -798,6 +859,7 @@ scan_logged_cards_time_goal_ms -= scan_hcc_time_ms; } + _pending_cards_at_prev_gc_end = _g1h->pending_card_num(); double const logged_cards_time = logged_cards_processing_time(); log_debug(gc, ergo, refine)("Concurrent refinement times: Logged Cards Scan time goal: %1.2fms Logged Cards Scan time: %1.2fms HCC time: %1.2fms", diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/g1/g1Policy.hpp --- a/src/hotspot/share/gc/g1/g1Policy.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/g1/g1Policy.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -100,7 +100,11 @@ size_t _rs_length_prediction; - size_t _pending_cards; + size_t _pending_cards_at_gc_start; + size_t _pending_cards_at_prev_gc_end; + size_t _total_mutator_refined_cards; + size_t _total_concurrent_refined_cards; + Tickspan _total_concurrent_refinement_time; // The amount of allocated bytes in old gen during the last mutator and the following // young GC phase. @@ -244,7 +248,15 @@ uint base_free_regions, double target_pause_time_ms) const; public: - size_t pending_cards() const { return _pending_cards; } + size_t pending_cards_at_gc_start() const { return _pending_cards_at_gc_start; } + + size_t total_concurrent_refined_cards() const { + return _total_concurrent_refined_cards; + } + + size_t total_mutator_refined_cards() const { + return _total_mutator_refined_cards; + } // Calculate the minimum number of old regions we'll add to the CSet // during a mixed GC. @@ -283,6 +295,9 @@ void record_pause(PauseKind kind, double start, double end); // Indicate that we aborted marking before doing any mixed GCs. void abort_time_to_mixed_tracking(); + + void record_concurrent_refinement_data(bool is_full_collection); + public: G1Policy(STWGCTimer* gc_timer); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/g1/g1RemSet.cpp --- a/src/hotspot/share/gc/g1/g1RemSet.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/g1/g1RemSet.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -489,7 +489,6 @@ _scan_state(new G1RemSetScanState()), _prev_period_summary(), _g1h(g1h), - _num_conc_refined_cards(0), _ct(ct), _g1p(_g1h->policy()), _hot_card_cache(hot_card_cache) { @@ -1377,7 +1376,6 @@ G1ConcurrentRefineOopClosure conc_refine_cl(_g1h, worker_id); if (r->oops_on_memregion_seq_iterate_careful(dirty_region, &conc_refine_cl) != NULL) { - _num_conc_refined_cards++; // Unsynchronized update, only used for logging. return; } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/g1/g1RemSet.hpp --- a/src/hotspot/share/gc/g1/g1RemSet.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/g1/g1RemSet.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -61,7 +61,6 @@ G1RemSetSummary _prev_period_summary; G1CollectedHeap* _g1h; - size_t _num_conc_refined_cards; // Number of cards refined concurrently to the mutator. G1CardTable* _ct; G1Policy* _g1p; @@ -125,8 +124,6 @@ // Print accumulated summary info from the last time called. void print_periodic_summary_info(const char* header, uint period_count); - size_t num_conc_refined_cards() const { return _num_conc_refined_cards; } - // Rebuilds the remembered set by scanning from bottom to TARS for all regions // using the given work gang. void rebuild_rem_set(G1ConcurrentMark* cm, WorkGang* workers, uint worker_id_offset); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/g1/g1RemSetSummary.cpp --- a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -27,6 +27,7 @@ #include "gc/g1/g1ConcurrentRefine.hpp" #include "gc/g1/g1ConcurrentRefineThread.hpp" #include "gc/g1/g1DirtyCardQueue.hpp" +#include "gc/g1/g1Policy.hpp" #include "gc/g1/g1RemSet.hpp" #include "gc/g1/g1RemSetSummary.hpp" #include "gc/g1/g1YoungRemSetSamplingThread.hpp" @@ -53,18 +54,17 @@ }; void G1RemSetSummary::update() { - _num_conc_refined_cards = _rem_set->num_conc_refined_cards(); - G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); - _num_processed_buf_mutator = dcqs.processed_buffers_mut(); - _num_processed_buf_rs_threads = dcqs.processed_buffers_rs_thread(); + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + + const G1Policy* policy = g1h->policy(); + _total_mutator_refined_cards = policy->total_mutator_refined_cards(); + _total_concurrent_refined_cards = policy->total_concurrent_refined_cards(); _num_coarsenings = HeapRegionRemSet::n_coarsenings(); - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - G1ConcurrentRefine* cg1r = g1h->concurrent_refine(); if (_rs_threads_vtimes != NULL) { GetRSThreadVTimeClosure p(this); - cg1r->threads_do(&p); + g1h->concurrent_refine()->threads_do(&p); } set_sampling_thread_vtime(g1h->sampling_thread()->vtime_accum()); } @@ -83,9 +83,8 @@ G1RemSetSummary::G1RemSetSummary() : _rem_set(NULL), - _num_conc_refined_cards(0), - _num_processed_buf_mutator(0), - _num_processed_buf_rs_threads(0), + _total_mutator_refined_cards(0), + _total_concurrent_refined_cards(0), _num_coarsenings(0), _num_vtimes(G1ConcurrentRefine::max_num_threads()), _rs_threads_vtimes(NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC)), @@ -96,9 +95,8 @@ G1RemSetSummary::G1RemSetSummary(G1RemSet* rem_set) : _rem_set(rem_set), - _num_conc_refined_cards(0), - _num_processed_buf_mutator(0), - _num_processed_buf_rs_threads(0), + _total_mutator_refined_cards(0), + _total_concurrent_refined_cards(0), _num_coarsenings(0), _num_vtimes(G1ConcurrentRefine::max_num_threads()), _rs_threads_vtimes(NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC)), @@ -114,12 +112,10 @@ assert(other != NULL, "just checking"); assert(_num_vtimes == other->_num_vtimes, "just checking"); - _num_conc_refined_cards = other->num_conc_refined_cards(); + _total_mutator_refined_cards = other->total_mutator_refined_cards(); + _total_concurrent_refined_cards = other->total_concurrent_refined_cards(); - _num_processed_buf_mutator = other->num_processed_buf_mutator(); - _num_processed_buf_rs_threads = other->num_processed_buf_rs_threads(); - - _num_coarsenings = other->_num_coarsenings; + _num_coarsenings = other->num_coarsenings(); memcpy(_rs_threads_vtimes, other->_rs_threads_vtimes, sizeof(double) * _num_vtimes); @@ -130,10 +126,8 @@ assert(other != NULL, "just checking"); assert(_num_vtimes == other->_num_vtimes, "just checking"); - _num_conc_refined_cards = other->num_conc_refined_cards() - _num_conc_refined_cards; - - _num_processed_buf_mutator = other->num_processed_buf_mutator() - _num_processed_buf_mutator; - _num_processed_buf_rs_threads = other->num_processed_buf_rs_threads() - _num_processed_buf_rs_threads; + _total_mutator_refined_cards = other->total_mutator_refined_cards() - _total_mutator_refined_cards; + _total_concurrent_refined_cards = other->total_concurrent_refined_cards() - _total_concurrent_refined_cards; _num_coarsenings = other->num_coarsenings() - _num_coarsenings; @@ -356,16 +350,15 @@ void G1RemSetSummary::print_on(outputStream* out) { out->print_cr(" Recent concurrent refinement statistics"); - out->print_cr(" Processed " SIZE_FORMAT " cards concurrently", num_conc_refined_cards()); - out->print_cr(" Of " SIZE_FORMAT " completed buffers:", num_processed_buf_total()); - out->print_cr(" " SIZE_FORMAT_W(8) " (%5.1f%%) by concurrent RS threads.", - num_processed_buf_total(), - percent_of(num_processed_buf_rs_threads(), num_processed_buf_total())); + out->print_cr(" Of " SIZE_FORMAT " refined cards:", total_refined_cards()); + out->print_cr(" " SIZE_FORMAT_W(8) " (%5.1f%%) by concurrent refinement threads.", + total_concurrent_refined_cards(), + percent_of(total_concurrent_refined_cards(), total_refined_cards())); out->print_cr(" " SIZE_FORMAT_W(8) " (%5.1f%%) by mutator threads.", - num_processed_buf_mutator(), - percent_of(num_processed_buf_mutator(), num_processed_buf_total())); + total_mutator_refined_cards(), + percent_of(total_mutator_refined_cards(), total_refined_cards())); out->print_cr(" Did " SIZE_FORMAT " coarsenings.", num_coarsenings()); - out->print_cr(" Concurrent RS threads times (s)"); + out->print_cr(" Concurrent refinement threads times (s)"); out->print(" "); for (uint i = 0; i < _num_vtimes; i++) { out->print(" %5.2f", rs_thread_vtime(i)); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/g1/g1RemSetSummary.hpp --- a/src/hotspot/share/gc/g1/g1RemSetSummary.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/g1/g1RemSetSummary.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -38,9 +38,8 @@ G1RemSet* _rem_set; - size_t _num_conc_refined_cards; - size_t _num_processed_buf_mutator; - size_t _num_processed_buf_rs_threads; + size_t _total_mutator_refined_cards; + size_t _total_concurrent_refined_cards; size_t _num_coarsenings; @@ -76,20 +75,16 @@ return _sampling_thread_vtime; } - size_t num_conc_refined_cards() const { - return _num_conc_refined_cards; + size_t total_mutator_refined_cards() const { + return _total_mutator_refined_cards; } - size_t num_processed_buf_mutator() const { - return _num_processed_buf_mutator; + size_t total_concurrent_refined_cards() const { + return _total_concurrent_refined_cards; } - size_t num_processed_buf_rs_threads() const { - return _num_processed_buf_rs_threads; - } - - size_t num_processed_buf_total() const { - return num_processed_buf_mutator() + num_processed_buf_rs_threads(); + size_t total_refined_cards() const { + return total_mutator_refined_cards() + total_concurrent_refined_cards(); } size_t num_coarsenings() const { diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/g1/heapRegionRemSet.hpp --- a/src/hotspot/share/gc/g1/heapRegionRemSet.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/g1/heapRegionRemSet.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -186,10 +186,6 @@ _collision_list_next(NULL) {} - inline void add_card_work(CardIdx_t from_card, bool par); - - inline void add_reference_work(OopOrNarrowOopStar from, bool par); - public: // We need access in order to union things into the base table. BitMap* bm() { return &_bm; } @@ -206,12 +202,8 @@ inline void add_reference(OopOrNarrowOopStar from); - inline void seq_add_reference(OopOrNarrowOopStar from); - inline void add_card(CardIdx_t from_card_index); - void seq_add_card(CardIdx_t from_card_index); - // (Destructively) union the bitmap of the current table into the given // bitmap (which is assumed to be of the same size.) void union_bitmap_into(BitMap* bm) { @@ -381,12 +373,6 @@ _state = Complete; } - // Used in the sequential case. - void add_reference(OopOrNarrowOopStar from) { - add_reference(from, 0); - } - - // Used in the parallel case. void add_reference(OopOrNarrowOopStar from, uint tid) { RemSetState state = _state; if (state == Untracked) { diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/g1/heapRegionRemSet.inline.hpp --- a/src/hotspot/share/gc/g1/heapRegionRemSet.inline.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/g1/heapRegionRemSet.inline.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -35,20 +35,13 @@ _other_regions.iterate(cl); } -inline void PerRegionTable::add_card_work(CardIdx_t from_card, bool par) { - if (!_bm.at(from_card)) { - if (par) { - if (_bm.par_set_bit(from_card)) { - Atomic::inc(&_occupied); - } - } else { - _bm.set_bit(from_card); - _occupied++; - } +inline void PerRegionTable::add_card(CardIdx_t from_card_index) { + if (_bm.par_set_bit(from_card_index)) { + Atomic::inc(&_occupied); } } -inline void PerRegionTable::add_reference_work(OopOrNarrowOopStar from, bool par) { +inline void PerRegionTable::add_reference(OopOrNarrowOopStar from) { // Must make this robust in case "from" is not in "_hr", because of // concurrency. @@ -58,26 +51,10 @@ // and adding a bit to the new table is never incorrect. if (loc_hr->is_in_reserved(from)) { CardIdx_t from_card = OtherRegionsTable::card_within_region(from, loc_hr); - add_card_work(from_card, par); + add_card(from_card); } } -inline void PerRegionTable::add_card(CardIdx_t from_card_index) { - add_card_work(from_card_index, /*parallel*/ true); -} - -inline void PerRegionTable::seq_add_card(CardIdx_t from_card_index) { - add_card_work(from_card_index, /*parallel*/ false); -} - -inline void PerRegionTable::add_reference(OopOrNarrowOopStar from) { - add_reference_work(from, /*parallel*/ true); -} - -inline void PerRegionTable::seq_add_reference(OopOrNarrowOopStar from) { - add_reference_work(from, /*parallel*/ false); -} - inline void PerRegionTable::init(HeapRegion* hr, bool clear_links_to_all_list) { if (clear_links_to_all_list) { set_next(NULL); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/shared/c2/barrierSetC2.hpp --- a/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -264,7 +264,7 @@ virtual void clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const; // Support for GC barriers emitted during parsing - virtual bool has_load_barriers() const { return false; } + virtual bool has_load_barrier_nodes() const { return false; } virtual bool is_gc_barrier_node(Node* node) const { return false; } virtual Node* step_over_gc_barrier(Node* c) const { return c; } virtual Node* step_over_gc_barrier_ctrl(Node* c) const { return c; } @@ -287,13 +287,9 @@ virtual bool is_gc_specific_loop_opts_pass(LoopOptsMode mode) const { return false; } virtual bool has_special_unique_user(const Node* node) const { return false; } - virtual bool needs_anti_dependence_check(const Node* node) const { return true; } - - virtual void barrier_insertion_phase(Compile* C, PhaseIterGVN &igvn) const { } enum CompilePhase { BeforeOptimize, - BeforeLateInsertion, BeforeMacroExpand, BeforeCodeGen }; @@ -320,6 +316,10 @@ virtual Node* split_if_pre(PhaseIdealLoop* phase, Node* n) const { return NULL; } virtual bool build_loop_late_post(PhaseIdealLoop* phase, Node* n) const { return false; } virtual bool sink_node(PhaseIdealLoop* phase, Node* n, Node* x, Node* x_ctrl, Node* n_ctrl) const { return false; } + + virtual void late_barrier_analysis() const { } + virtual int estimate_stub_size() const { return 0; } + virtual void emit_stubs(CodeBuffer& cb) const { } }; #endif // SHARE_GC_SHARED_C2_BARRIERSETC2_HPP diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp --- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -103,7 +103,7 @@ static const TypeFunc* write_ref_field_pre_entry_Type(); static const TypeFunc* shenandoah_clone_barrier_Type(); static const TypeFunc* shenandoah_load_reference_barrier_Type(); - virtual bool has_load_barriers() const { return true; } + virtual bool has_load_barrier_nodes() const { return true; } // This is the entry-point for the backend to perform accesses through the Access API. virtual void clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const; diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -66,9 +66,12 @@ size_t min_garbage = free_target > actual_free ? (free_target - actual_free) : 0; size_t max_cset = (size_t)((1.0 * capacity / 100 * ShenandoahEvacReserve) / ShenandoahEvacWaste); - log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "M, Actual Free: " - SIZE_FORMAT "M, Max CSet: " SIZE_FORMAT "M, Min Garbage: " SIZE_FORMAT "M", - free_target / M, actual_free / M, max_cset / M, min_garbage / M); + log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "%s, Actual Free: " + SIZE_FORMAT "%s, Max CSet: " SIZE_FORMAT "%s, Min Garbage: " SIZE_FORMAT "%s", + byte_size_in_proper_unit(free_target), proper_unit_for_byte_size(free_target), + byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free), + byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset), + byte_size_in_proper_unit(min_garbage), proper_unit_for_byte_size(min_garbage)); // Better select garbage-first regions QuickSort::sort(data, (int)size, compare_by_garbage, false); @@ -119,8 +122,9 @@ // anything else. size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold; if (available < min_threshold) { - log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)", - available / M, min_threshold / M); + log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold)); return true; } @@ -129,8 +133,10 @@ if (_gc_times_learned < max_learn) { size_t init_threshold = capacity / 100 * ShenandoahInitFreeThreshold; if (available < init_threshold) { - log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "M) is below initial threshold (" SIZE_FORMAT "M)", - _gc_times_learned + 1, max_learn, available / M, init_threshold / M); + log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "%s) is below initial threshold (" SIZE_FORMAT "%s)", + _gc_times_learned + 1, max_learn, + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(init_threshold), proper_unit_for_byte_size(init_threshold)); return true; } } @@ -154,10 +160,15 @@ double allocation_rate = heap->bytes_allocated_since_gc_start() / time_since_last; if (average_gc > allocation_headroom / allocation_rate) { - log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for allocation rate (%.2f MB/s) to deplete free headroom (" SIZE_FORMAT "M)", - average_gc * 1000, allocation_rate / M, allocation_headroom / M); - log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "M (free) - " SIZE_FORMAT "M (spike) - " SIZE_FORMAT "M (penalties) = " SIZE_FORMAT "M", - available / M, spike_headroom / M, penalties / M, allocation_headroom / M); + log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for allocation rate (%.0f %sB/s) to deplete free headroom (" SIZE_FORMAT "%s)", + average_gc * 1000, + byte_size_in_proper_unit(allocation_rate), proper_unit_for_byte_size(allocation_rate), + byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom)); + log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "%s (free) - " SIZE_FORMAT "%s (spike) - " SIZE_FORMAT "%s (penalties) = " SIZE_FORMAT "%s", + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(spike_headroom), proper_unit_for_byte_size(spike_headroom), + byte_size_in_proper_unit(penalties), proper_unit_for_byte_size(penalties), + byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom)); return true; } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -59,21 +59,24 @@ size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold; if (available < min_threshold) { - log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)", - available / M, min_threshold / M); + log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold)); return true; } if (available < threshold_bytes_allocated) { - log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is lower than allocated recently (" SIZE_FORMAT "M)", - available / M, threshold_bytes_allocated / M); + log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is lower than allocated recently (" SIZE_FORMAT "%s)", + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(threshold_bytes_allocated), proper_unit_for_byte_size(threshold_bytes_allocated)); return true; } size_t bytes_allocated = heap->bytes_allocated_since_gc_start(); if (bytes_allocated > threshold_bytes_allocated) { - log_info(gc)("Trigger: Allocated since last cycle (" SIZE_FORMAT "M) is larger than allocation threshold (" SIZE_FORMAT "M)", - bytes_allocated / M, threshold_bytes_allocated / M); + log_info(gc)("Trigger: Allocated since last cycle (" SIZE_FORMAT "%s) is larger than allocation threshold (" SIZE_FORMAT "%s)", + byte_size_in_proper_unit(bytes_allocated), proper_unit_for_byte_size(bytes_allocated), + byte_size_in_proper_unit(threshold_bytes_allocated), proper_unit_for_byte_size(threshold_bytes_allocated)); return true; } @@ -86,8 +89,9 @@ // Do not select too large CSet that would overflow the available free space size_t max_cset = actual_free * 3 / 4; - log_info(gc, ergo)("CSet Selection. Actual Free: " SIZE_FORMAT "M, Max CSet: " SIZE_FORMAT "M", - actual_free / M, max_cset / M); + log_info(gc, ergo)("CSet Selection. Actual Free: " SIZE_FORMAT "%s, Max CSet: " SIZE_FORMAT "%s", + byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free), + byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset)); size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100; diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -60,8 +60,9 @@ size_t available = MAX2(capacity / 100 * ShenandoahEvacReserve, actual_free); size_t max_cset = (size_t)(available / ShenandoahEvacWaste); - log_info(gc, ergo)("CSet Selection. Actual Free: " SIZE_FORMAT "M, Max CSet: " SIZE_FORMAT "M", - actual_free / M, max_cset / M); + log_info(gc, ergo)("CSet Selection. Actual Free: " SIZE_FORMAT "%s, Max CSet: " SIZE_FORMAT "%s", + byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free), + byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset)); size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100; diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -57,8 +57,9 @@ size_t threshold_available = capacity / 100 * ShenandoahFreeThreshold; if (available < threshold_available) { - log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below free threshold (" SIZE_FORMAT "M)", - available / M, threshold_available / M); + log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below free threshold (" SIZE_FORMAT "%s)", + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(threshold_available), proper_unit_for_byte_size(threshold_available)); return true; } return ShenandoahHeuristics::should_start_gc(); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalHeuristics.cpp --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalHeuristics.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahTraversalHeuristics.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -100,9 +100,12 @@ size_t min_garbage = free_target > actual_free ? (free_target - actual_free) : 0; size_t max_cset = (size_t)((1.0 * capacity / 100 * ShenandoahEvacReserve) / ShenandoahEvacWaste); - log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "M, Actual Free: " - SIZE_FORMAT "M, Max CSet: " SIZE_FORMAT "M, Min Garbage: " SIZE_FORMAT "M", - free_target / M, actual_free / M, max_cset / M, min_garbage / M); + log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "%s, Actual Free: " + SIZE_FORMAT "%s, Max CSet: " SIZE_FORMAT "%s, Min Garbage: " SIZE_FORMAT "%s", + byte_size_in_proper_unit(free_target), proper_unit_for_byte_size(free_target), + byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free), + byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset), + byte_size_in_proper_unit(min_garbage), proper_unit_for_byte_size(min_garbage)); // Better select garbage-first regions, and then older ones QuickSort::sort(data, (int) cnt, compare_by_garbage_then_alloc_seq_ascending, false); @@ -190,8 +193,9 @@ // anything else. size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold; if (available < min_threshold) { - log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)", - available / M, min_threshold / M); + log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold)); return true; } @@ -200,8 +204,10 @@ if (_gc_times_learned < max_learn) { size_t init_threshold = capacity / 100 * ShenandoahInitFreeThreshold; if (available < init_threshold) { - log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "M) is below initial threshold (" SIZE_FORMAT "M)", - _gc_times_learned + 1, max_learn, available / M, init_threshold / M); + log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "%s) is below initial threshold (" SIZE_FORMAT "%s)", + _gc_times_learned + 1, max_learn, + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(init_threshold), proper_unit_for_byte_size(init_threshold)); return true; } } @@ -223,10 +229,15 @@ double allocation_rate = heap->bytes_allocated_since_gc_start() / time_since_last; if (average_gc > allocation_headroom / allocation_rate) { - log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for allocation rate (%.2f MB/s) to deplete free headroom (" SIZE_FORMAT "M)", - average_gc * 1000, allocation_rate / M, allocation_headroom / M); - log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "M (free) - " SIZE_FORMAT "M (spike) - " SIZE_FORMAT "M (penalties) = " SIZE_FORMAT "M", - available / M, spike_headroom / M, penalties / M, allocation_headroom / M); + log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for allocation rate (%.0f %sB/s) to deplete free headroom (" SIZE_FORMAT "%s)", + average_gc * 1000, + byte_size_in_proper_unit(allocation_rate), proper_unit_for_byte_size(allocation_rate), + byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom)); + log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "%s (free) - " SIZE_FORMAT "%s (spike) - " SIZE_FORMAT "%s (penalties) = " SIZE_FORMAT "%s", + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(spike_headroom), proper_unit_for_byte_size(spike_headroom), + byte_size_in_proper_unit(penalties), proper_unit_for_byte_size(penalties), + byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom)); return true; } else if (ShenandoahHeuristics::should_start_gc()) { return true; diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -69,7 +69,8 @@ // enough, but we also do not want to steal too much CPU from the concurrently running // application. Using 1/4 of available threads for concurrent GC seems a good // compromise here. - if (FLAG_IS_DEFAULT(ConcGCThreads)) { + bool ergo_conc = FLAG_IS_DEFAULT(ConcGCThreads); + if (ergo_conc) { FLAG_SET_DEFAULT(ConcGCThreads, MAX2(1, os::processor_count() / 4)); } @@ -82,7 +83,8 @@ // that will overwhelm the OS scheduler. Using 1/2 of available threads seems to be a fair // compromise here. Due to implementation constraints, it should not be lower than // the number of concurrent threads. - if (FLAG_IS_DEFAULT(ParallelGCThreads)) { + bool ergo_parallel = FLAG_IS_DEFAULT(ParallelGCThreads); + if (ergo_parallel) { FLAG_SET_DEFAULT(ParallelGCThreads, MAX2(1, os::processor_count() / 2)); } @@ -90,9 +92,21 @@ vm_exit_during_initialization("Shenandoah expects ParallelGCThreads > 0, check -XX:ParallelGCThreads=#"); } + // Make sure ergonomic decisions do not break the thread count invariants. + // This may happen when user overrides one of the flags, but not the other. + // When that happens, we want to adjust the setting that was set ergonomically. if (ParallelGCThreads < ConcGCThreads) { - warning("Shenandoah expects ConcGCThreads <= ParallelGCThreads, adjusting ParallelGCThreads automatically"); - FLAG_SET_DEFAULT(ParallelGCThreads, ConcGCThreads); + if (ergo_conc && !ergo_parallel) { + FLAG_SET_DEFAULT(ConcGCThreads, ParallelGCThreads); + } else if (!ergo_conc && ergo_parallel) { + FLAG_SET_DEFAULT(ParallelGCThreads, ConcGCThreads); + } else if (ergo_conc && ergo_parallel) { + // Should not happen, check the ergonomic computation above. Fail with relevant error. + vm_exit_during_initialization("Shenandoah thread count ergonomic error"); + } else { + // User settings error, report and ask user to rectify. + vm_exit_during_initialization("Shenandoah expects ConcGCThreads <= ParallelGCThreads, check -XX:ParallelGCThreads, -XX:ConcGCThreads"); + } } if (FLAG_IS_DEFAULT(ParallelRefProcEnabled)) { diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -319,13 +319,20 @@ }; void ShenandoahConcurrentMark::update_thread_roots(ShenandoahPhaseTimings::Phase root_phase) { - WorkGang* workers = _heap->workers(); - bool is_par = workers->active_workers() > 1; + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + + ShenandoahGCPhase phase(root_phase); + #if COMPILER2_OR_JVMCI DerivedPointerTable::clear(); #endif + + WorkGang* workers = _heap->workers(); + bool is_par = workers->active_workers() > 1; + ShenandoahUpdateThreadRootsTask task(is_par, root_phase); workers->run_task(&task); + #if COMPILER2_OR_JVMCI DerivedPointerTable::update_pointers(); #endif diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -491,8 +491,12 @@ size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes(); size_t free = capacity() - used(); - ls.print("Free: " SIZE_FORMAT "M (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "K, Max humongous: " SIZE_FORMAT "K, ", - total_free / M, mutator_count(), max / K, max_humongous / K); + ls.print("Free: " SIZE_FORMAT "%s (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "%s, Max humongous: " SIZE_FORMAT "%s, ", + byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), + mutator_count(), + byte_size_in_proper_unit(max), proper_unit_for_byte_size(max), + byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous) + ); size_t frag_ext; if (free > 0) { @@ -525,8 +529,10 @@ } } - ls.print_cr("Evacuation Reserve: " SIZE_FORMAT "M (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "K", - total_free / M, collector_count(), max / K); + ls.print_cr("Evacuation Reserve: " SIZE_FORMAT "%s (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "%s", + byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), + collector_count(), + byte_size_in_proper_unit(max), proper_unit_for_byte_size(max)); } } } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -524,10 +524,14 @@ void ShenandoahHeap::print_on(outputStream* st) const { st->print_cr("Shenandoah Heap"); - st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used", - max_capacity() / K, committed() / K, used() / K); - st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions", - num_regions(), ShenandoahHeapRegion::region_size_bytes() / K); + st->print_cr(" " SIZE_FORMAT "%s total, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used", + byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()), + byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()), + byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used())); + st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions", + num_regions(), + byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()), + proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes())); st->print("Status: "); if (has_forwarded_objects()) st->print("has forwarded objects, "); @@ -961,7 +965,7 @@ ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh); ShenandoahHeapRegion* r; while ((r =_cs->claim_next()) != NULL) { - assert(r->has_live(), "all-garbage regions are reclaimed early"); + assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->region_number()); _sh->marked_object_iterate(r, &cl); if (ShenandoahPacing) { diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -527,29 +527,35 @@ size_t region_size; if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) { if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) { - err_msg message("Max heap size (" SIZE_FORMAT "K) is too low to afford the minimum number " - "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "K).", - max_heap_size/K, MIN_NUM_REGIONS, ShenandoahMinRegionSize/K); + err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number " + "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).", + byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size), + MIN_NUM_REGIONS, + byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize)); vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); } if (ShenandoahMinRegionSize < MIN_REGION_SIZE) { - err_msg message("" SIZE_FORMAT "K should not be lower than minimum region size (" SIZE_FORMAT "K).", - ShenandoahMinRegionSize/K, MIN_REGION_SIZE/K); + err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).", + byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize), + byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE)); vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); } if (ShenandoahMinRegionSize < MinTLABSize) { - err_msg message("" SIZE_FORMAT "K should not be lower than TLAB size size (" SIZE_FORMAT "K).", - ShenandoahMinRegionSize/K, MinTLABSize/K); + err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).", + byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize), + byte_size_in_proper_unit(MinTLABSize), proper_unit_for_byte_size(MinTLABSize)); vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); } if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) { - err_msg message("" SIZE_FORMAT "K should not be lower than min region size (" SIZE_FORMAT "K).", - ShenandoahMaxRegionSize/K, MIN_REGION_SIZE/K); + err_msg message("" SIZE_FORMAT "%s should not be lower than min region size (" SIZE_FORMAT "%s).", + byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize), + byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE)); vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message); } if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) { - err_msg message("Minimum (" SIZE_FORMAT "K) should be larger than maximum (" SIZE_FORMAT "K).", - ShenandoahMinRegionSize/K, ShenandoahMaxRegionSize/K); + err_msg message("Minimum (" SIZE_FORMAT "%s) should be larger than maximum (" SIZE_FORMAT "%s).", + byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize), + byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize)); vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message); } @@ -563,19 +569,23 @@ } else { if (ShenandoahHeapRegionSize > max_heap_size / MIN_NUM_REGIONS) { - err_msg message("Max heap size (" SIZE_FORMAT "K) is too low to afford the minimum number " - "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "K).", - max_heap_size/K, MIN_NUM_REGIONS, ShenandoahHeapRegionSize/K); + err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number " + "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "%s).", + byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size), + MIN_NUM_REGIONS, + byte_size_in_proper_unit(ShenandoahHeapRegionSize), proper_unit_for_byte_size(ShenandoahHeapRegionSize)); vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); } if (ShenandoahHeapRegionSize < ShenandoahMinRegionSize) { - err_msg message("Heap region size (" SIZE_FORMAT "K) should be larger than min region size (" SIZE_FORMAT "K).", - ShenandoahHeapRegionSize/K, ShenandoahMinRegionSize/K); + err_msg message("Heap region size (" SIZE_FORMAT "%s) should be larger than min region size (" SIZE_FORMAT "%s).", + byte_size_in_proper_unit(ShenandoahHeapRegionSize), proper_unit_for_byte_size(ShenandoahHeapRegionSize), + byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize)); vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); } if (ShenandoahHeapRegionSize > ShenandoahMaxRegionSize) { - err_msg message("Heap region size (" SIZE_FORMAT "K) should be lower than max region size (" SIZE_FORMAT "K).", - ShenandoahHeapRegionSize/K, ShenandoahMaxRegionSize/K); + err_msg message("Heap region size (" SIZE_FORMAT "%s) should be lower than max region size (" SIZE_FORMAT "%s).", + byte_size_in_proper_unit(ShenandoahHeapRegionSize), proper_unit_for_byte_size(ShenandoahHeapRegionSize), + byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize)); vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); } region_size = ShenandoahHeapRegionSize; diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -198,7 +198,7 @@ // Macro-properties: bool is_alloc_allowed() const { return is_empty() || is_regular() || _state == _pinned; } - bool is_move_allowed() const { return is_regular() || _state == _cset || (ShenandoahHumongousMoves && _state == _humongous_start); } + bool is_stw_move_allowed() const { return is_regular() || _state == _cset || (ShenandoahHumongousMoves && _state == _humongous_start); } RegionState state() const { return _state; } int state_ordinal() const { return region_state_to_ordinal(_state); } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/shenandoah/shenandoahHeuristics.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahHeuristics.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeuristics.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -186,8 +186,9 @@ // given the amount of immediately reclaimable garbage. If we do, figure out the collection set. assert (immediate_garbage <= total_garbage, - "Cannot have more immediate garbage than total garbage: " SIZE_FORMAT "M vs " SIZE_FORMAT "M", - immediate_garbage / M, total_garbage / M); + "Cannot have more immediate garbage than total garbage: " SIZE_FORMAT "%s vs " SIZE_FORMAT "%s", + byte_size_in_proper_unit(immediate_garbage), proper_unit_for_byte_size(immediate_garbage), + byte_size_in_proper_unit(total_garbage), proper_unit_for_byte_size(total_garbage)); size_t immediate_percent = total_garbage == 0 ? 0 : (immediate_garbage * 100 / total_garbage); @@ -196,12 +197,16 @@ collection_set->update_region_status(); size_t cset_percent = total_garbage == 0 ? 0 : (collection_set->garbage() * 100 / total_garbage); - log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "M (" SIZE_FORMAT "%% of total), " SIZE_FORMAT "M CSet, " SIZE_FORMAT " CSet regions", - collection_set->garbage() / M, cset_percent, collection_set->live_data() / M, collection_set->count()); + log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "%s (" SIZE_FORMAT "%% of total), " SIZE_FORMAT "%s CSet, " SIZE_FORMAT " CSet regions", + byte_size_in_proper_unit(collection_set->garbage()), proper_unit_for_byte_size(collection_set->garbage()), + cset_percent, + byte_size_in_proper_unit(collection_set->live_data()), proper_unit_for_byte_size(collection_set->live_data()), + collection_set->count()); } - log_info(gc, ergo)("Immediate Garbage: " SIZE_FORMAT "M (" SIZE_FORMAT "%% of total), " SIZE_FORMAT " regions", - immediate_garbage / M, immediate_percent, immediate_regions); + log_info(gc, ergo)("Immediate Garbage: " SIZE_FORMAT "%s (" SIZE_FORMAT "%% of total), " SIZE_FORMAT " regions", + byte_size_in_proper_unit(immediate_garbage), proper_unit_for_byte_size(immediate_garbage), + immediate_percent, immediate_regions); } void ShenandoahHeuristics::record_gc_start() { diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -337,7 +337,7 @@ // Can move the region, and this is not the humongous region. Humongous // moves are special cased here, because their moves are handled separately. - if (from_region->is_move_allowed() && !from_region->is_humongous()) break; + if (from_region->is_stw_move_allowed() && !from_region->is_humongous()) break; from_region = _heap_regions.next(); } @@ -345,7 +345,7 @@ if (from_region != NULL) { assert(slice != NULL, "sanity"); assert(!from_region->is_humongous(), "this path cannot handle humongous regions"); - assert(from_region->is_empty() || from_region->is_move_allowed(), "only regions that can be moved in mark-compact"); + assert(from_region->is_empty() || from_region->is_stw_move_allowed(), "only regions that can be moved in mark-compact"); slice->add_region(from_region); } @@ -419,7 +419,7 @@ continue; } - if (r->is_humongous_start() && r->is_move_allowed()) { + if (r->is_humongous_start() && r->is_stw_move_allowed()) { // From-region candidate: movable humongous region oop old_obj = oop(r->bottom()); size_t words_size = old_obj->size(); @@ -761,7 +761,7 @@ size_t new_start = heap->heap_region_index_containing(old_obj->forwardee()); size_t new_end = new_start + num_regions - 1; assert(old_start != new_start, "must be real move"); - assert (r->is_move_allowed(), "should be movable"); + assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->region_number()); Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(), heap->get_region(new_start)->bottom(), diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -70,9 +70,12 @@ restart_with(non_taxable, tax); - log_info(gc, ergo)("Pacer for Mark. Expected Live: " SIZE_FORMAT "M, Free: " SIZE_FORMAT - "M, Non-Taxable: " SIZE_FORMAT "M, Alloc Tax Rate: %.1fx", - live / M, free / M, non_taxable / M, tax); + log_info(gc, ergo)("Pacer for Mark. Expected Live: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " + "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", + byte_size_in_proper_unit(live), proper_unit_for_byte_size(live), + byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), + byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), + tax); } void ShenandoahPacer::setup_for_evac() { @@ -91,9 +94,12 @@ restart_with(non_taxable, tax); - log_info(gc, ergo)("Pacer for Evacuation. Used CSet: " SIZE_FORMAT "M, Free: " SIZE_FORMAT - "M, Non-Taxable: " SIZE_FORMAT "M, Alloc Tax Rate: %.1fx", - used / M, free / M, non_taxable / M, tax); + log_info(gc, ergo)("Pacer for Evacuation. Used CSet: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " + "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", + byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), + byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), + byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), + tax); } void ShenandoahPacer::setup_for_updaterefs() { @@ -112,9 +118,12 @@ restart_with(non_taxable, tax); - log_info(gc, ergo)("Pacer for Update Refs. Used: " SIZE_FORMAT "M, Free: " SIZE_FORMAT - "M, Non-Taxable: " SIZE_FORMAT "M, Alloc Tax Rate: %.1fx", - used / M, free / M, non_taxable / M, tax); + log_info(gc, ergo)("Pacer for Update Refs. Used: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " + "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", + byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), + byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), + byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), + tax); } /* @@ -136,9 +145,12 @@ restart_with(non_taxable, tax); - log_info(gc, ergo)("Pacer for Traversal. Expected Live: " SIZE_FORMAT "M, Free: " SIZE_FORMAT - "M, Non-Taxable: " SIZE_FORMAT "M, Alloc Tax Rate: %.1fx", - live / M, free / M, non_taxable / M, tax); + log_info(gc, ergo)("Pacer for Traversal. Expected Live: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " + "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", + byte_size_in_proper_unit(live), proper_unit_for_byte_size(live), + byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), + byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), + tax); } /* @@ -158,8 +170,9 @@ restart_with(initial, tax); - log_info(gc, ergo)("Pacer for Idle. Initial: " SIZE_FORMAT "M, Alloc Tax Rate: %.1fx", - initial / M, tax); + log_info(gc, ergo)("Pacer for Idle. Initial: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", + byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial), + tax); } size_t ShenandoahPacer::update_and_get_progress_history() { diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/shenandoah/shenandoahStrDedupQueue.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahStrDedupQueue.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/shenandoah/shenandoahStrDedupQueue.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -205,8 +205,11 @@ void ShenandoahStrDedupQueue::print_statistics_impl() { Log(gc, stringdedup) log; log.debug(" Queue:"); - log.debug(" Total buffers: " SIZE_FORMAT " (" SIZE_FORMAT " K). " SIZE_FORMAT " buffers are on free list", - _total_buffers, (_total_buffers * sizeof(ShenandoahQueueBuffer) / K), _num_free_buffer); + log.debug(" Total buffers: " SIZE_FORMAT " (" SIZE_FORMAT " %s). " SIZE_FORMAT " buffers are on free list", + _total_buffers, + byte_size_in_proper_unit(_total_buffers * sizeof(ShenandoahQueueBuffer)), + proper_unit_for_byte_size(_total_buffers * sizeof(ShenandoahQueueBuffer)), + _num_free_buffer); } class VerifyQueueClosure : public OopClosure { diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -366,8 +366,10 @@ // Rebuild free set free_set->rebuild(); - log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "M, " SIZE_FORMAT "M CSet, " SIZE_FORMAT " CSet regions", - collection_set->garbage() / M, collection_set->live_data() / M, collection_set->count()); + log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "%s, " SIZE_FORMAT "%s CSet, " SIZE_FORMAT " CSet regions", + byte_size_in_proper_unit(collection_set->garbage()), proper_unit_for_byte_size(collection_set->garbage()), + byte_size_in_proper_unit(collection_set->live_data()), proper_unit_for_byte_size(collection_set->live_data()), + collection_set->count()); } void ShenandoahTraversalGC::init_traversal_collection() { diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -686,13 +686,17 @@ _heap->heap_region_iterate(&cl); size_t heap_used = _heap->used(); guarantee(cl.used() == heap_used, - "%s: heap used size must be consistent: heap-used = " SIZE_FORMAT "K, regions-used = " SIZE_FORMAT "K", - label, heap_used/K, cl.used()/K); + "%s: heap used size must be consistent: heap-used = " SIZE_FORMAT "%s, regions-used = " SIZE_FORMAT "%s", + label, + byte_size_in_proper_unit(heap_used), proper_unit_for_byte_size(heap_used), + byte_size_in_proper_unit(cl.used()), proper_unit_for_byte_size(cl.used())); size_t heap_committed = _heap->committed(); guarantee(cl.committed() == heap_committed, - "%s: heap committed size must be consistent: heap-committed = " SIZE_FORMAT "K, regions-committed = " SIZE_FORMAT "K", - label, heap_committed/K, cl.committed()/K); + "%s: heap committed size must be consistent: heap-committed = " SIZE_FORMAT "%s, regions-committed = " SIZE_FORMAT "%s", + label, + byte_size_in_exact_unit(heap_committed), proper_unit_for_byte_size(heap_committed), + byte_size_in_exact_unit(cl.committed()), proper_unit_for_byte_size(cl.committed())); } // Internal heap region checks diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -299,11 +299,11 @@ "Should internally-caused GCs invoke concurrent cycles, or go to" \ "stop-the-world (degenerated/full)?") \ \ - experimental(bool, ShenandoahHumongousMoves, true, \ + diagnostic(bool, ShenandoahHumongousMoves, true, \ "Allow moving humongous regions. This makes GC more resistant " \ "to external fragmentation that may otherwise fail other " \ "humongous allocations, at the expense of higher GC copying " \ - "costs.") \ + "costs. Currently affects stop-the-world (full) cycle only.") \ \ diagnostic(bool, ShenandoahOOMDuringEvacALot, false, \ "Simulate OOM during evacuation frequently.") \ @@ -314,9 +314,6 @@ diagnostic(bool, ShenandoahTerminationTrace, false, \ "Tracing task termination timings") \ \ - develop(bool, ShenandoahVerifyObjectEquals, false, \ - "Verify that == and != are not used on oops. Only in fastdebug") \ - \ diagnostic(bool, ShenandoahAlwaysPreTouch, false, \ "Pre-touch heap memory, overrides global AlwaysPreTouch") \ \ diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp --- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -22,451 +22,157 @@ */ #include "precompiled.hpp" -#include "opto/castnode.hpp" +#include "classfile/javaClasses.hpp" +#include "gc/z/c2/zBarrierSetC2.hpp" +#include "gc/z/zBarrierSet.hpp" +#include "gc/z/zBarrierSetAssembler.hpp" +#include "gc/z/zBarrierSetRuntime.hpp" +#include "opto/block.hpp" #include "opto/compile.hpp" -#include "opto/escape.hpp" #include "opto/graphKit.hpp" -#include "opto/loopnode.hpp" #include "opto/machnode.hpp" -#include "opto/macro.hpp" #include "opto/memnode.hpp" -#include "opto/movenode.hpp" #include "opto/node.hpp" -#include "opto/phase.hpp" -#include "opto/phaseX.hpp" +#include "opto/regalloc.hpp" #include "opto/rootnode.hpp" -#include "opto/type.hpp" -#include "utilities/copy.hpp" #include "utilities/growableArray.hpp" #include "utilities/macros.hpp" -#include "gc/z/zBarrierSet.hpp" -#include "gc/z/c2/zBarrierSetC2.hpp" -#include "gc/z/zThreadLocalData.hpp" -#include "gc/z/zBarrierSetRuntime.hpp" -ZBarrierSetC2State::ZBarrierSetC2State(Arena* comp_arena) : - _load_barrier_nodes(new (comp_arena) GrowableArray(comp_arena, 8, 0, NULL)) {} +class ZBarrierSetC2State : public ResourceObj { +private: + GrowableArray* _stubs; + Node_Array _live; -int ZBarrierSetC2State::load_barrier_count() const { - return _load_barrier_nodes->length(); -} +public: + ZBarrierSetC2State(Arena* arena) : + _stubs(new (arena) GrowableArray(arena, 8, 0, NULL)), + _live(arena) {} -void ZBarrierSetC2State::add_load_barrier_node(LoadBarrierNode * n) { - assert(!_load_barrier_nodes->contains(n), " duplicate entry in expand list"); - _load_barrier_nodes->append(n); -} + GrowableArray* stubs() { + return _stubs; + } -void ZBarrierSetC2State::remove_load_barrier_node(LoadBarrierNode * n) { - // this function may be called twice for a node so check - // that the node is in the array before attempting to remove it - if (_load_barrier_nodes->contains(n)) { - _load_barrier_nodes->remove(n); - } -} + RegMask* live(const Node* node) { + if (!node->is_Mach()) { + // Don't need liveness for non-MachNodes + return NULL; + } -LoadBarrierNode* ZBarrierSetC2State::load_barrier_node(int idx) const { - return _load_barrier_nodes->at(idx); -} + const MachNode* const mach = node->as_Mach(); + if (mach->barrier_data() != ZLoadBarrierStrong && + mach->barrier_data() != ZLoadBarrierWeak) { + // Don't need liveness data for nodes without barriers + return NULL; + } -void* ZBarrierSetC2::create_barrier_state(Arena* comp_arena) const { - return new(comp_arena) ZBarrierSetC2State(comp_arena); -} + RegMask* live = (RegMask*)_live[node->_idx]; + if (live == NULL) { + live = new (Compile::current()->comp_arena()->Amalloc_D(sizeof(RegMask))) RegMask(); + _live.map(node->_idx, (Node*)live); + } -ZBarrierSetC2State* ZBarrierSetC2::state() const { + return live; + } +}; + +static ZBarrierSetC2State* barrier_set_state() { return reinterpret_cast(Compile::current()->barrier_set_state()); } -bool ZBarrierSetC2::is_gc_barrier_node(Node* node) const { - // 1. This step follows potential oop projections of a load barrier before expansion - if (node->is_Proj()) { - node = node->in(0); - } - - // 2. This step checks for unexpanded load barriers - if (node->is_LoadBarrier()) { - return true; - } - - // 3. This step checks for the phi corresponding to an optimized load barrier expansion - if (node->is_Phi()) { - PhiNode* phi = node->as_Phi(); - Node* n = phi->in(1); - if (n != NULL && n->is_LoadBarrierSlowReg()) { - return true; - } - } - - return false; -} - -void ZBarrierSetC2::register_potential_barrier_node(Node* node) const { - if (node->is_LoadBarrier()) { - state()->add_load_barrier_node(node->as_LoadBarrier()); - } -} - -void ZBarrierSetC2::unregister_potential_barrier_node(Node* node) const { - if (node->is_LoadBarrier()) { - state()->remove_load_barrier_node(node->as_LoadBarrier()); - } -} - -void ZBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const { - // Remove useless LoadBarrier nodes - ZBarrierSetC2State* s = state(); - for (int i = s->load_barrier_count()-1; i >= 0; i--) { - LoadBarrierNode* n = s->load_barrier_node(i); - if (!useful.member(n)) { - unregister_potential_barrier_node(n); - } - } -} - -void ZBarrierSetC2::enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const { - if (node->is_LoadBarrier() && !node->as_LoadBarrier()->has_true_uses()) { - igvn->_worklist.push(node); - } -} - -const uint NoBarrier = 0; -const uint RequireBarrier = 1; -const uint WeakBarrier = 2; -const uint ExpandedBarrier = 4; - -static bool load_require_barrier(LoadNode* load) { return (load->barrier_data() & RequireBarrier) == RequireBarrier; } -static bool load_has_weak_barrier(LoadNode* load) { return (load->barrier_data() & WeakBarrier) == WeakBarrier; } -static bool load_has_expanded_barrier(LoadNode* load) { return (load->barrier_data() & ExpandedBarrier) == ExpandedBarrier; } -static void load_set_expanded_barrier(LoadNode* load) { return load->set_barrier_data(ExpandedBarrier); } - -static void load_set_barrier(LoadNode* load, bool weak) { - if (weak) { - load->set_barrier_data(RequireBarrier | WeakBarrier); - } else { - load->set_barrier_data(RequireBarrier); - } -} - -// == LoadBarrierNode == - -LoadBarrierNode::LoadBarrierNode(Compile* C, - Node* c, - Node* mem, - Node* val, - Node* adr, - bool weak) : - MultiNode(Number_of_Inputs), - _weak(weak) { - init_req(Control, c); - init_req(Memory, mem); - init_req(Oop, val); - init_req(Address, adr); - init_req(Similar, C->top()); - - init_class_id(Class_LoadBarrier); - BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); - bs->register_potential_barrier_node(this); -} - -uint LoadBarrierNode::size_of() const { - return sizeof(*this); -} - -bool LoadBarrierNode::cmp(const Node& n) const { - ShouldNotReachHere(); - return false; -} - -const Type *LoadBarrierNode::bottom_type() const { - const Type** floadbarrier = (const Type **)(Compile::current()->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*))); - Node* in_oop = in(Oop); - floadbarrier[Control] = Type::CONTROL; - floadbarrier[Memory] = Type::MEMORY; - floadbarrier[Oop] = in_oop == NULL ? Type::TOP : in_oop->bottom_type(); - return TypeTuple::make(Number_of_Outputs, floadbarrier); -} - -const TypePtr* LoadBarrierNode::adr_type() const { - ShouldNotReachHere(); - return NULL; -} - -const Type *LoadBarrierNode::Value(PhaseGVN *phase) const { - const Type** floadbarrier = (const Type **)(phase->C->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*))); - const Type* val_t = phase->type(in(Oop)); - floadbarrier[Control] = Type::CONTROL; - floadbarrier[Memory] = Type::MEMORY; - floadbarrier[Oop] = val_t; - return TypeTuple::make(Number_of_Outputs, floadbarrier); -} - -bool LoadBarrierNode::is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n) { - if (phase != NULL) { - return phase->is_dominator(d, n); - } - - for (int i = 0; i < 10 && n != NULL; i++) { - n = IfNode::up_one_dom(n, linear_only); - if (n == d) { - return true; - } - } - - return false; -} - -LoadBarrierNode* LoadBarrierNode::has_dominating_barrier(PhaseIdealLoop* phase, bool linear_only, bool look_for_similar) { - if (is_weak()) { - // Weak barriers can't be eliminated - return NULL; - } - - Node* val = in(LoadBarrierNode::Oop); - if (in(Similar)->is_Proj() && in(Similar)->in(0)->is_LoadBarrier()) { - LoadBarrierNode* lb = in(Similar)->in(0)->as_LoadBarrier(); - assert(lb->in(Address) == in(Address), ""); - // Load barrier on Similar edge dominates so if it now has the Oop field it can replace this barrier. - if (lb->in(Oop) == in(Oop)) { - return lb; - } - // Follow chain of load barrier through Similar edges - while (!lb->in(Similar)->is_top()) { - lb = lb->in(Similar)->in(0)->as_LoadBarrier(); - assert(lb->in(Address) == in(Address), ""); - } - if (lb != in(Similar)->in(0)) { - return lb; - } - } - for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) { - Node* u = val->fast_out(i); - if (u != this && u->is_LoadBarrier() && u->in(Oop) == val && u->as_LoadBarrier()->has_true_uses()) { - Node* this_ctrl = in(LoadBarrierNode::Control); - Node* other_ctrl = u->in(LoadBarrierNode::Control); - if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) { - return u->as_LoadBarrier(); - } - } - } - - if (can_be_eliminated()) { - return NULL; - } - - if (!look_for_similar) { - return NULL; +ZLoadBarrierStubC2* ZLoadBarrierStubC2::create(const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak) { + ZLoadBarrierStubC2* const stub = new (Compile::current()->comp_arena()) ZLoadBarrierStubC2(node, ref_addr, ref, tmp, weak); + if (!Compile::current()->in_scratch_emit_size()) { + barrier_set_state()->stubs()->append(stub); } - Node* addr = in(LoadBarrierNode::Address); - for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) { - Node* u = addr->fast_out(i); - if (u != this && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) { - Node* this_ctrl = in(LoadBarrierNode::Control); - Node* other_ctrl = u->in(LoadBarrierNode::Control); - if (is_dominator(phase, linear_only, other_ctrl, this_ctrl)) { - ResourceMark rm; - Unique_Node_List wq; - wq.push(in(LoadBarrierNode::Control)); - bool ok = true; - bool dom_found = false; - for (uint next = 0; next < wq.size(); ++next) { - Node *n = wq.at(next); - if (n->is_top()) { - return NULL; - } - assert(n->is_CFG(), ""); - if (n->is_SafePoint()) { - ok = false; - break; - } - if (n == u) { - dom_found = true; - continue; - } - if (n->is_Region()) { - for (uint i = 1; i < n->req(); i++) { - Node* m = n->in(i); - if (m != NULL) { - wq.push(m); - } - } - } else { - Node* m = n->in(0); - if (m != NULL) { - wq.push(m); - } - } - } - if (ok) { - assert(dom_found, ""); - return u->as_LoadBarrier(); - } - break; - } - } - } + return stub; +} + +ZLoadBarrierStubC2::ZLoadBarrierStubC2(const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak) : + _node(node), + _ref_addr(ref_addr), + _ref(ref), + _tmp(tmp), + _weak(weak), + _entry(), + _continuation() { + assert_different_registers(ref, ref_addr.base()); + assert_different_registers(ref, ref_addr.index()); +} - return NULL; +Address ZLoadBarrierStubC2::ref_addr() const { + return _ref_addr; +} + +Register ZLoadBarrierStubC2::ref() const { + return _ref; +} + +Register ZLoadBarrierStubC2::tmp() const { + return _tmp; +} + +address ZLoadBarrierStubC2::slow_path() const { + const DecoratorSet decorators = _weak ? ON_WEAK_OOP_REF : ON_STRONG_OOP_REF; + return ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators); } -void LoadBarrierNode::push_dominated_barriers(PhaseIterGVN* igvn) const { - // Change to that barrier may affect a dominated barrier so re-push those - assert(!is_weak(), "sanity"); - Node* val = in(LoadBarrierNode::Oop); +RegMask& ZLoadBarrierStubC2::live() const { + return *barrier_set_state()->live(_node); +} + +Label* ZLoadBarrierStubC2::entry() { + // The _entry will never be bound when in_scratch_emit_size() is true. + // However, we still need to return a label that is not bound now, but + // will eventually be bound. Any lable will do, as it will only act as + // a placeholder, so we return the _continuation label. + return Compile::current()->in_scratch_emit_size() ? &_continuation : &_entry; +} + +Label* ZLoadBarrierStubC2::continuation() { + return &_continuation; +} - for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) { - Node* u = val->fast_out(i); - if (u != this && u->is_LoadBarrier() && u->in(Oop) == val) { - Node* this_ctrl = in(Control); - Node* other_ctrl = u->in(Control); - if (is_dominator(NULL, false, this_ctrl, other_ctrl)) { - igvn->_worklist.push(u); - } +void* ZBarrierSetC2::create_barrier_state(Arena* comp_arena) const { + return new (comp_arena) ZBarrierSetC2State(comp_arena); +} + +void ZBarrierSetC2::late_barrier_analysis() const { + analyze_dominating_barriers(); + compute_liveness_at_stubs(); +} + +void ZBarrierSetC2::emit_stubs(CodeBuffer& cb) const { + MacroAssembler masm(&cb); + GrowableArray* const stubs = barrier_set_state()->stubs(); + + for (int i = 0; i < stubs->length(); i++) { + // Make sure there is enough space in the code buffer + if (cb.insts()->maybe_expand_to_ensure_remaining(Compile::MAX_inst_size) && cb.blob() == NULL) { + ciEnv::current()->record_failure("CodeCache is full"); + return; } - Node* addr = in(LoadBarrierNode::Address); - for (DUIterator_Fast imax, i = addr->fast_outs(imax); i < imax; i++) { - Node* u = addr->fast_out(i); - if (u != this && u->is_LoadBarrier() && u->in(Similar)->is_top()) { - Node* this_ctrl = in(Control); - Node* other_ctrl = u->in(Control); - if (is_dominator(NULL, false, this_ctrl, other_ctrl)) { - igvn->_worklist.push(u); - } - } - } - } -} - -Node *LoadBarrierNode::Identity(PhaseGVN *phase) { - LoadBarrierNode* dominating_barrier = has_dominating_barrier(NULL, true, false); - if (dominating_barrier != NULL) { - assert(!is_weak(), "Weak barriers cant be eliminated"); - assert(dominating_barrier->in(Oop) == in(Oop), ""); - return dominating_barrier; - } - - return this; -} - -Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) { - if (remove_dead_region(phase, can_reshape)) { - return this; + ZBarrierSet::assembler()->generate_c2_load_barrier_stub(&masm, stubs->at(i)); } - Node *val = in(Oop); - Node *mem = in(Memory); - Node *ctrl = in(Control); - - assert(val->Opcode() != Op_LoadN, ""); - assert(val->Opcode() != Op_DecodeN, ""); - - if (mem->is_MergeMem()) { - Node *new_mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw); - set_req(Memory, new_mem); - if (mem->outcnt() == 0 && can_reshape) { - phase->is_IterGVN()->_worklist.push(mem); - } - return this; - } + masm.flush(); +} - LoadBarrierNode *dominating_barrier = NULL; - if (!is_weak()) { - dominating_barrier = has_dominating_barrier(NULL, !can_reshape, !phase->C->major_progress()); - if (dominating_barrier != NULL && dominating_barrier->in(Oop) != in(Oop)) { - assert(in(Address) == dominating_barrier->in(Address), ""); - set_req(Similar, dominating_barrier->proj_out(Oop)); - return this; - } - } - - bool eliminate = can_reshape && (dominating_barrier != NULL || !has_true_uses()); - if (eliminate) { - if (can_reshape) { - PhaseIterGVN* igvn = phase->is_IterGVN(); - Node* out_ctrl = proj_out_or_null(Control); - Node* out_res = proj_out_or_null(Oop); +int ZBarrierSetC2::estimate_stub_size() const { + Compile* const C = Compile::current(); + BufferBlob* const blob = C->scratch_buffer_blob(); + GrowableArray* const stubs = barrier_set_state()->stubs(); + int size = 0; - if (out_ctrl != NULL) { - igvn->replace_node(out_ctrl, ctrl); - } - - // That transformation may cause the Similar edge on the load barrier to be invalid - fix_similar_in_uses(igvn); - if (out_res != NULL) { - if (dominating_barrier != NULL) { - assert(!is_weak(), "Sanity"); - igvn->replace_node(out_res, dominating_barrier->proj_out(Oop)); - } else { - igvn->replace_node(out_res, val); - } - } - } - return new ConINode(TypeInt::ZERO); + for (int i = 0; i < stubs->length(); i++) { + CodeBuffer cb(blob->content_begin(), (address)C->scratch_locs_memory() - blob->content_begin()); + MacroAssembler masm(&cb); + ZBarrierSet::assembler()->generate_c2_load_barrier_stub(&masm, stubs->at(i)); + size += cb.insts_size(); } - // If the Similar edge is no longer a load barrier, clear it - Node* similar = in(Similar); - if (!similar->is_top() && !(similar->is_Proj() && similar->in(0)->is_LoadBarrier())) { - set_req(Similar, phase->C->top()); - return this; - } - - if (can_reshape && !is_weak()) { - // If this barrier is linked through the Similar edge by a - // dominated barrier and both barriers have the same Oop field, - // the dominated barrier can go away, so push it for reprocessing. - // We also want to avoid a barrier to depend on another dominating - // barrier through its Similar edge that itself depend on another - // barrier through its Similar edge and rather have the first - // depend on the third. - PhaseIterGVN* igvn = phase->is_IterGVN(); - Node* out_res = proj_out(Oop); - for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) { - Node* u = out_res->fast_out(i); - if (u->is_LoadBarrier() && u->in(Similar) == out_res && - (u->in(Oop) == val || !u->in(Similar)->is_top())) { - assert(!u->as_LoadBarrier()->is_weak(), "Sanity"); - igvn->_worklist.push(u); - } - } - push_dominated_barriers(igvn); - } - - return NULL; -} - -uint LoadBarrierNode::match_edge(uint idx) const { - ShouldNotReachHere(); - return 0; -} - -void LoadBarrierNode::fix_similar_in_uses(PhaseIterGVN* igvn) { - Node* out_res = proj_out_or_null(Oop); - if (out_res == NULL) { - return; - } - - for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) { - Node* u = out_res->fast_out(i); - if (u->is_LoadBarrier() && u->in(Similar) == out_res) { - igvn->replace_input_of(u, Similar, igvn->C->top()); - --i; - --imax; - } - } -} - -bool LoadBarrierNode::has_true_uses() const { - Node* out_res = proj_out_or_null(Oop); - if (out_res != NULL) { - for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) { - Node *u = out_res->fast_out(i); - if (!u->is_LoadBarrier() || u->in(Similar) != out_res) { - return true; - } - } - } - return false; + return size; } static bool barrier_needed(C2Access& access) { @@ -474,1223 +180,252 @@ } Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const { - Node* p = BarrierSetC2::load_at_resolved(access, val_type); - if (!barrier_needed(access)) { - return p; + Node* result = BarrierSetC2::load_at_resolved(access, val_type); + if (barrier_needed(access) && access.raw_access()->is_Mem()) { + if ((access.decorators() & ON_WEAK_OOP_REF) != 0) { + access.raw_access()->as_Load()->set_barrier_data(ZLoadBarrierWeak); + } else { + access.raw_access()->as_Load()->set_barrier_data(ZLoadBarrierStrong); + } } - bool weak = (access.decorators() & ON_WEAK_OOP_REF) != 0; - if (p->isa_Load()) { - load_set_barrier(p->as_Load(), weak); - } - return p; + return result; } Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val, Node* new_val, const Type* val_type) const { Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type); - LoadStoreNode* lsn = result->as_LoadStore(); if (barrier_needed(access)) { - lsn->set_has_barrier(); + access.raw_access()->as_LoadStore()->set_barrier_data(ZLoadBarrierStrong); } - return lsn; + return result; } Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val, Node* new_val, const Type* value_type) const { Node* result = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type); - LoadStoreNode* lsn = result->as_LoadStore(); if (barrier_needed(access)) { - lsn->set_has_barrier(); + access.raw_access()->as_LoadStore()->set_barrier_data(ZLoadBarrierStrong); } - return lsn; + return result; } Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const { Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type); - LoadStoreNode* lsn = result->as_LoadStore(); if (barrier_needed(access)) { - lsn->set_has_barrier(); + access.raw_access()->as_LoadStore()->set_barrier_data(ZLoadBarrierStrong); } - return lsn; + return result; } -// == Macro Expansion == - -// Optimized, low spill, loadbarrier variant using stub specialized on register used -void ZBarrierSetC2::expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const { - PhaseIterGVN &igvn = phase->igvn(); - float unlikely = PROB_UNLIKELY(0.999); - - Node* in_ctrl = barrier->in(LoadBarrierNode::Control); - Node* in_mem = barrier->in(LoadBarrierNode::Memory); - Node* in_val = barrier->in(LoadBarrierNode::Oop); - Node* in_adr = barrier->in(LoadBarrierNode::Address); - - Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control); - Node* out_res = barrier->proj_out(LoadBarrierNode::Oop); - - assert(barrier->in(LoadBarrierNode::Oop) != NULL, "oop to loadbarrier node cannot be null"); - - Node* jthread = igvn.transform(new ThreadLocalNode()); - Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset())); - Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr, - TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), - MemNode::unordered)); - Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val)); - Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask)); - Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type()))); - Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool(); - IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If(); - Node* then = igvn.transform(new IfTrueNode(iff)); - Node* elsen = igvn.transform(new IfFalseNode(iff)); - - Node* new_loadp = igvn.transform(new LoadBarrierSlowRegNode(then, in_adr, in_val, - (const TypePtr*) in_val->bottom_type(), barrier->is_weak())); - - // Create the final region/phi pair to converge cntl/data paths to downstream code - Node* result_region = igvn.transform(new RegionNode(3)); - result_region->set_req(1, then); - result_region->set_req(2, elsen); - - Node* result_phi = igvn.transform(new PhiNode(result_region, TypeInstPtr::BOTTOM)); - result_phi->set_req(1, new_loadp); - result_phi->set_req(2, barrier->in(LoadBarrierNode::Oop)); - - igvn.replace_node(out_ctrl, result_region); - igvn.replace_node(out_res, result_phi); - - assert(barrier->outcnt() == 0,"LoadBarrier macro node has non-null outputs after expansion!"); - - igvn.remove_dead_node(barrier); - igvn.remove_dead_node(out_ctrl); - igvn.remove_dead_node(out_res); - - assert(is_gc_barrier_node(result_phi), "sanity"); - assert(step_over_gc_barrier(result_phi) == in_val, "sanity"); - - phase->C->print_method(PHASE_BARRIER_EXPANSION, 4, barrier->_idx); +bool ZBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, + bool is_clone, ArrayCopyPhase phase) const { + return type == T_OBJECT || type == T_ARRAY; } -bool ZBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const { - ZBarrierSetC2State* s = state(); - if (s->load_barrier_count() > 0) { - PhaseMacroExpand macro(igvn); +// == Dominating barrier elision == - int skipped = 0; - while (s->load_barrier_count() > skipped) { - int load_barrier_count = s->load_barrier_count(); - LoadBarrierNode * n = s->load_barrier_node(load_barrier_count-1-skipped); - if (igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) { - // Node is unreachable, so don't try to expand it - s->remove_load_barrier_node(n); - continue; - } - if (!n->can_be_eliminated()) { - skipped++; - continue; - } - expand_loadbarrier_node(¯o, n); - assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list"); - if (C->failing()) { - return true; - } - } - while (s->load_barrier_count() > 0) { - int load_barrier_count = s->load_barrier_count(); - LoadBarrierNode* n = s->load_barrier_node(load_barrier_count - 1); - assert(!(igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())), "should have been processed already"); - assert(!n->can_be_eliminated(), "should have been processed already"); - expand_loadbarrier_node(¯o, n); - assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list"); - if (C->failing()) { - return true; - } - } - igvn.set_delay_transform(false); - igvn.optimize(); - if (C->failing()) { +static bool block_has_safepoint(const Block* block, uint from, uint to) { + for (uint i = from; i < to; i++) { + if (block->get_node(i)->is_MachSafePoint()) { + // Safepoint found return true; } } + // Safepoint not found return false; } -Node* ZBarrierSetC2::step_over_gc_barrier(Node* c) const { - Node* node = c; +static bool block_has_safepoint(const Block* block) { + return block_has_safepoint(block, 0, block->number_of_nodes()); +} - // 1. This step follows potential oop projections of a load barrier before expansion - if (node->is_Proj()) { - node = node->in(0); +static uint block_index(const Block* block, const Node* node) { + for (uint j = 0; j < block->number_of_nodes(); ++j) { + if (block->get_node(j) == node) { + return j; + } } + ShouldNotReachHere(); + return 0; +} + +void ZBarrierSetC2::analyze_dominating_barriers() const { + ResourceMark rm; + Compile* const C = Compile::current(); + PhaseCFG* const cfg = C->cfg(); + Block_List worklist; + Node_List mem_ops; + Node_List barrier_loads; - // 2. This step checks for unexpanded load barriers - if (node->is_LoadBarrier()) { - return node->in(LoadBarrierNode::Oop); - } + // Step 1 - Find accesses, and track them in lists + for (uint i = 0; i < cfg->number_of_blocks(); ++i) { + const Block* const block = cfg->get_block(i); + for (uint j = 0; j < block->number_of_nodes(); ++j) { + const Node* const node = block->get_node(j); + if (!node->is_Mach()) { + continue; + } - // 3. This step checks for the phi corresponding to an optimized load barrier expansion - if (node->is_Phi()) { - PhiNode* phi = node->as_Phi(); - Node* n = phi->in(1); - if (n != NULL && n->is_LoadBarrierSlowReg()) { - assert(c == node, "projections from step 1 should only be seen before macro expansion"); - return phi->in(2); + MachNode* const mach = node->as_Mach(); + switch (mach->ideal_Opcode()) { + case Op_LoadP: + case Op_CompareAndExchangeP: + case Op_CompareAndSwapP: + case Op_GetAndSetP: + if (mach->barrier_data() == ZLoadBarrierStrong) { + barrier_loads.push(mach); + } + case Op_StoreP: + mem_ops.push(mach); + break; + + default: + break; + } } } - return c; -} - -Node* ZBarrierSetC2::step_over_gc_barrier_ctrl(Node* c) const { - Node* node = c; - - // 1. This step follows potential ctrl projections of a load barrier before expansion - if (node->is_Proj()) { - node = node->in(0); - } - - // 2. This step checks for unexpanded load barriers - if (node->is_LoadBarrier()) { - return node->in(LoadBarrierNode::Control); - } - - return c; -} - -bool ZBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const { - return is_reference_type(type); -} - -bool ZBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, uint opcode) const { - switch (opcode) { - case Op_LoadBarrier: - assert(0, "There should be no load barriers left"); - case Op_ZGetAndSetP: - case Op_ZCompareAndExchangeP: - case Op_ZCompareAndSwapP: - case Op_ZWeakCompareAndSwapP: -#ifdef ASSERT - if (VerifyOptoOopOffsets) { - MemNode *mem = n->as_Mem(); - // Check to see if address types have grounded out somehow. - const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr(); - ciInstanceKlass *k = tp->klass()->as_instance_klass(); - bool oop_offset_is_sane = k->contains_field_offset(tp->offset()); - assert(!tp || oop_offset_is_sane, ""); - } -#endif - return true; - default: - return false; - } -} + // Step 2 - Find dominating accesses for each load + for (uint i = 0; i < barrier_loads.size(); i++) { + MachNode* const load = barrier_loads.at(i)->as_Mach(); + const TypePtr* load_adr_type = NULL; + intptr_t load_offset = 0; + const Node* const load_obj = load->get_base_and_disp(load_offset, load_adr_type); + Block* const load_block = cfg->get_block_for_node(load); + const uint load_index = block_index(load_block, load); -bool ZBarrierSetC2::matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const { - switch(opcode) { - case Op_CallLeaf: - if (n->as_Call()->entry_point() == ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr() || - n->as_Call()->entry_point() == ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr()) { - mem_op = true; - mem_addr_idx = TypeFunc::Parms + 1; - return true; - } - return false; - default: - return false; - } -} - -bool ZBarrierSetC2::matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const { - switch(opcode) { - case Op_ZCompareAndExchangeP: - case Op_ZCompareAndSwapP: - case Op_ZWeakCompareAndSwapP: { - Node *mem = n->in(MemNode::Address); - Node *keepalive = n->in(5); - Node *pair1 = new BinaryNode(mem, keepalive); - - Node *newval = n->in(MemNode::ValueIn); - Node *oldval = n->in(LoadStoreConditionalNode::ExpectedIn); - Node *pair2 = new BinaryNode(oldval, newval); + for (uint j = 0; j < mem_ops.size(); j++) { + MachNode* mem = mem_ops.at(j)->as_Mach(); + const TypePtr* mem_adr_type = NULL; + intptr_t mem_offset = 0; + const Node* mem_obj = mem_obj = mem->get_base_and_disp(mem_offset, mem_adr_type); + Block* mem_block = cfg->get_block_for_node(mem); + uint mem_index = block_index(mem_block, mem); - n->set_req(MemNode::Address, pair1); - n->set_req(MemNode::ValueIn, pair2); - n->del_req(5); - n->del_req(LoadStoreConditionalNode::ExpectedIn); - return true; - } - case Op_ZGetAndSetP: { - Node *keepalive = n->in(4); - Node *newval = n->in(MemNode::ValueIn); - Node *pair = new BinaryNode(newval, keepalive); - n->set_req(MemNode::ValueIn, pair); - n->del_req(4); - return true; - } + if (load_obj == NodeSentinel || mem_obj == NodeSentinel || + load_obj == NULL || mem_obj == NULL || + load_offset < 0 || mem_offset < 0) { + continue; + } - default: - return false; - } -} - -// == Verification == - -#ifdef ASSERT - -static void verify_slippery_safepoints_internal(Node* ctrl) { - // Given a CFG node, make sure it does not contain both safepoints and loads - // that have expanded barriers. - bool found_safepoint = false; - bool found_load = false; + if (mem_obj != load_obj || mem_offset != load_offset) { + // Not the same addresses, not a candidate + continue; + } - for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) { - Node* node = ctrl->fast_out(i); - if (node->in(0) != ctrl) { - // Skip outgoing precedence edges from ctrl. - continue; - } - if (node->is_SafePoint()) { - found_safepoint = true; - } - if (node->is_Load() && load_require_barrier(node->as_Load()) && - load_has_expanded_barrier(node->as_Load())) { - found_load = true; - } - } - assert(!found_safepoint || !found_load, "found load and safepoint in same block"); -} - -static void verify_slippery_safepoints(Compile* C) { - ResourceArea *area = Thread::current()->resource_area(); - Unique_Node_List visited(area); - Unique_Node_List checked(area); - - // Recursively walk the graph. - visited.push(C->root()); - while (visited.size() > 0) { - Node* node = visited.pop(); - - Node* ctrl = node; - if (!node->is_CFG()) { - ctrl = node->in(0); - } - - if (ctrl != NULL && !checked.member(ctrl)) { - // For each block found in the graph, verify that it does not - // contain both a safepoint and a load requiring barriers. - verify_slippery_safepoints_internal(ctrl); - - checked.push(ctrl); - } - - checked.push(node); - - for (DUIterator_Fast imax, i = node->fast_outs(imax); i < imax; i++) { - Node* use = node->fast_out(i); - if (checked.member(use)) continue; - if (visited.member(use)) continue; - visited.push(use); - } - } -} + if (load_block == mem_block) { + // Earlier accesses in the same block + if (mem_index < load_index && !block_has_safepoint(mem_block, mem_index + 1, load_index)) { + load->set_barrier_data(ZLoadBarrierElided); + } + } else if (mem_block->dominates(load_block)) { + // Dominating block? Look around for safepoints + ResourceMark rm; + Block_List stack; + VectorSet visited(Thread::current()->resource_area()); + stack.push(load_block); + bool safepoint_found = block_has_safepoint(load_block); + while (!safepoint_found && stack.size() > 0) { + Block* block = stack.pop(); + if (visited.test_set(block->_pre_order)) { + continue; + } + if (block_has_safepoint(block)) { + safepoint_found = true; + break; + } + if (block == mem_block) { + continue; + } -void ZBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const { - switch(phase) { - case BarrierSetC2::BeforeOptimize: - case BarrierSetC2::BeforeLateInsertion: - assert(state()->load_barrier_count() == 0, "No barriers inserted yet"); - break; - case BarrierSetC2::BeforeMacroExpand: - // Barrier placement should be set by now. - verify_gc_barriers(false /*post_parse*/); - break; - case BarrierSetC2::BeforeCodeGen: - // Barriers has been fully expanded. - assert(state()->load_barrier_count() == 0, "No more macro barriers"); - verify_slippery_safepoints(compile); - break; - default: - assert(0, "Phase without verification"); - } -} - -// post_parse implies that there might be load barriers without uses after parsing -// That only applies when adding barriers at parse time. -void ZBarrierSetC2::verify_gc_barriers(bool post_parse) const { - ZBarrierSetC2State* s = state(); - Compile* C = Compile::current(); - ResourceMark rm; - VectorSet visited(Thread::current()->resource_area()); - - for (int i = 0; i < s->load_barrier_count(); i++) { - LoadBarrierNode* n = s->load_barrier_node(i); - - // The dominating barrier on the same address if it exists and - // this barrier must not be applied on the value from the same - // load otherwise the value is not reloaded before it's used the - // second time. - assert(n->in(LoadBarrierNode::Similar)->is_top() || - (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() && - n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Address) == n->in(LoadBarrierNode::Address) && - n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Oop) != n->in(LoadBarrierNode::Oop)), - "broken similar edge"); - - assert(n->as_LoadBarrier()->has_true_uses(), - "found unneeded load barrier"); - - // Several load barrier nodes chained through their Similar edge - // break the code that remove the barriers in final graph reshape. - assert(n->in(LoadBarrierNode::Similar)->is_top() || - (n->in(LoadBarrierNode::Similar)->in(0)->is_LoadBarrier() && - n->in(LoadBarrierNode::Similar)->in(0)->in(LoadBarrierNode::Similar)->is_top()), - "chain of Similar load barriers"); - - if (!n->in(LoadBarrierNode::Similar)->is_top()) { - ResourceMark rm; - Unique_Node_List wq; - Node* other = n->in(LoadBarrierNode::Similar)->in(0); - wq.push(n); - for (uint next = 0; next < wq.size(); ++next) { - Node *nn = wq.at(next); - assert(nn->is_CFG(), ""); - assert(!nn->is_SafePoint(), ""); - - if (nn == other) { - continue; + // Push predecessor blocks + for (uint p = 1; p < block->num_preds(); ++p) { + Block* pred = cfg->get_block_for_node(block->pred(p)); + stack.push(pred); + } } - if (nn->is_Region()) { - for (uint i = 1; i < nn->req(); i++) { - Node* m = nn->in(i); - if (m != NULL) { - wq.push(m); - } - } - } else { - Node* m = nn->in(0); - if (m != NULL) { - wq.push(m); - } + if (!safepoint_found) { + load->set_barrier_data(ZLoadBarrierElided); } } } } } -#endif // end verification code - -// If a call is the control, we actually want its control projection -static Node* normalize_ctrl(Node* node) { - if (node->is_Call()) { - node = node->as_Call()->proj_out(TypeFunc::Control); - } - return node; -} - -static Node* get_ctrl_normalized(PhaseIdealLoop *phase, Node* node) { - return normalize_ctrl(phase->get_ctrl(node)); -} - -static void call_catch_cleanup_one(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl); - -// This code is cloning all uses of a load that is between a call and the catch blocks, -// to each use. - -static bool fixup_uses_in_catch(PhaseIdealLoop *phase, Node *start_ctrl, Node *node) { - - if (!phase->has_ctrl(node)) { - // This node is floating - doesn't need to be cloned. - assert(node != start_ctrl, "check"); - return false; - } - - Node* ctrl = get_ctrl_normalized(phase, node); - if (ctrl != start_ctrl) { - // We are in a successor block - the node is ok. - return false; // Unwind - } - - // Process successor nodes - int outcnt = node->outcnt(); - for (int i = 0; i < outcnt; i++) { - Node* n = node->raw_out(0); - assert(!n->is_LoadBarrier(), "Sanity"); - // Calling recursively, visiting leafs first - fixup_uses_in_catch(phase, start_ctrl, n); - } - - // Now all successors are outside - // - Clone this node to both successors - assert(!node->is_Store(), "Stores not expected here"); - - // In some very rare cases a load that doesn't need a barrier will end up here - // Treat it as a LoadP and the insertion of phis will be done correctly. - if (node->is_Load()) { - call_catch_cleanup_one(phase, node->as_Load(), phase->get_ctrl(node)); - } else { - for (DUIterator_Fast jmax, i = node->fast_outs(jmax); i < jmax; i++) { - Node* use = node->fast_out(i); - Node* clone = node->clone(); - assert(clone->outcnt() == 0, ""); +// == Reduced spilling optimization == - assert(use->find_edge(node) != -1, "check"); - phase->igvn().rehash_node_delayed(use); - use->replace_edge(node, clone); - - Node* new_ctrl; - if (use->is_block_start()) { - new_ctrl = use; - } else if (use->is_CFG()) { - new_ctrl = use->in(0); - assert (new_ctrl != NULL, ""); - } else { - new_ctrl = get_ctrl_normalized(phase, use); - } - - phase->set_ctrl(clone, new_ctrl); - - if (phase->C->directive()->ZTraceLoadBarriersOption) tty->print_cr(" Clone op %i as %i to control %i", node->_idx, clone->_idx, new_ctrl->_idx); - phase->igvn().register_new_node_with_optimizer(clone); - --i, --jmax; - } - assert(node->outcnt() == 0, "must be empty now"); - - // Node node is dead. - phase->igvn().remove_dead_node(node); - } - return true; // unwind - return if a use was processed -} +void ZBarrierSetC2::compute_liveness_at_stubs() const { + ResourceMark rm; + Compile* const C = Compile::current(); + Arena* const A = Thread::current()->resource_area(); + PhaseCFG* const cfg = C->cfg(); + PhaseRegAlloc* const regalloc = C->regalloc(); + RegMask* const live = NEW_ARENA_ARRAY(A, RegMask, cfg->number_of_blocks() * sizeof(RegMask)); + ZBarrierSetAssembler* const bs = ZBarrierSet::assembler(); + Block_List worklist; -// Clone a load to a specific catch_proj -static Node* clone_load_to_catchproj(PhaseIdealLoop* phase, Node* load, Node* catch_proj) { - Node* cloned_load = load->clone(); - cloned_load->set_req(0, catch_proj); // set explicit control - phase->set_ctrl(cloned_load, catch_proj); // update - if (phase->C->directive()->ZTraceLoadBarriersOption) tty->print_cr(" Clone LOAD %i as %i to control %i", load->_idx, cloned_load->_idx, catch_proj->_idx); - phase->igvn().register_new_node_with_optimizer(cloned_load); - return cloned_load; -} - -static Node* get_dominating_region(PhaseIdealLoop* phase, Node* node, Node* stop) { - Node* region = node; - while (!region->isa_Region()) { - Node *up = phase->idom(region); - assert(up != region, "Must not loop"); - assert(up != stop, "Must not find original control"); - region = up; - } - return region; -} - -// Clone this load to each catch block -static void call_catch_cleanup_one(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl) { - bool trace = phase->C->directive()->ZTraceLoadBarriersOption; - phase->igvn().set_delay_transform(true); - - // Verify pre conditions - assert(ctrl->isa_Proj() && ctrl->in(0)->isa_Call(), "Must be a call proj"); - assert(ctrl->raw_out(0)->isa_Catch(), "Must be a catch"); - - if (ctrl->raw_out(0)->isa_Catch()->outcnt() == 1) { - if (trace) tty->print_cr("Cleaning up catch: Skipping load %i, call with single catch", load->_idx); - return; + for (uint i = 0; i < cfg->number_of_blocks(); ++i) { + new ((void*)(live + i)) RegMask(); + worklist.push(cfg->get_block(i)); } - // Process the loads successor nodes - if any is between - // the call and the catch blocks, they need to be cloned to. - // This is done recursively - for (uint i = 0; i < load->outcnt();) { - Node *n = load->raw_out(i); - assert(!n->is_LoadBarrier(), "Sanity"); - if (!fixup_uses_in_catch(phase, ctrl, n)) { - // if no successor was cloned, progress to next out. - i++; - } - } - - // Now all the loads uses has been cloned down - // Only thing left is to clone the loads, but they must end up - // first in the catch blocks. - - // We clone the loads oo the catch blocks only when needed. - // An array is used to map the catch blocks to each lazily cloned load. - // In that way no extra unnecessary loads are cloned. - - // Any use dominated by original block must have an phi and a region added - - Node* catch_node = ctrl->raw_out(0); - int number_of_catch_projs = catch_node->outcnt(); - Node** proj_to_load_mapping = NEW_RESOURCE_ARRAY(Node*, number_of_catch_projs); - Copy::zero_to_bytes(proj_to_load_mapping, sizeof(Node*) * number_of_catch_projs); - - // The phi_map is used to keep track of where phis have already been inserted - int phi_map_len = phase->C->unique(); - Node** phi_map = NEW_RESOURCE_ARRAY(Node*, phi_map_len); - Copy::zero_to_bytes(phi_map, sizeof(Node*) * phi_map_len); + while (worklist.size() > 0) { + const Block* const block = worklist.pop(); + RegMask& old_live = live[block->_pre_order]; + RegMask new_live; - for (unsigned int i = 0; i < load->outcnt(); i++) { - Node* load_use_control = NULL; - Node* load_use = load->raw_out(i); - - if (phase->has_ctrl(load_use)) { - load_use_control = get_ctrl_normalized(phase, load_use); - assert(load_use_control != ctrl, "sanity"); - } else { - load_use_control = load_use->in(0); - } - assert(load_use_control != NULL, "sanity"); - if (trace) tty->print_cr(" Handling use: %i, with control: %i", load_use->_idx, load_use_control->_idx); - - // Some times the loads use is a phi. For them we need to determine from which catch block - // the use is defined. - bool load_use_is_phi = false; - unsigned int load_use_phi_index = 0; - Node* phi_ctrl = NULL; - if (load_use->is_Phi()) { - // Find phi input that matches load - for (unsigned int u = 1; u < load_use->req(); u++) { - if (load_use->in(u) == load) { - load_use_is_phi = true; - load_use_phi_index = u; - assert(load_use->in(0)->is_Region(), "Region or broken"); - phi_ctrl = load_use->in(0)->in(u); - assert(phi_ctrl->is_CFG(), "check"); - assert(phi_ctrl != load, "check"); - break; - } - } - assert(load_use_is_phi, "must find"); - assert(load_use_phi_index > 0, "sanity"); + // Initialize to union of successors + for (uint i = 0; i < block->_num_succs; i++) { + const uint succ_id = block->_succs[i]->_pre_order; + new_live.OR(live[succ_id]); } - // For each load use, see which catch projs dominates, create load clone lazily and reconnect - bool found_dominating_catchproj = false; - for (int c = 0; c < number_of_catch_projs; c++) { - Node* catchproj = catch_node->raw_out(c); - assert(catchproj != NULL && catchproj->isa_CatchProj(), "Sanity"); - - if (!phase->is_dominator(catchproj, load_use_control)) { - if (load_use_is_phi && phase->is_dominator(catchproj, phi_ctrl)) { - // The loads use is local to the catchproj. - // fall out and replace load with catch-local load clone. - } else { - continue; - } - } - assert(!found_dominating_catchproj, "Max one should match"); - - // Clone loads to catch projs - Node* load_clone = proj_to_load_mapping[c]; - if (load_clone == NULL) { - load_clone = clone_load_to_catchproj(phase, load, catchproj); - proj_to_load_mapping[c] = load_clone; - } - phase->igvn().rehash_node_delayed(load_use); + // Walk block backwards, computing liveness + for (int i = block->number_of_nodes() - 1; i >= 0; --i) { + const Node* const node = block->get_node(i); - if (load_use_is_phi) { - // phis are special - the load is defined from a specific control flow - load_use->set_req(load_use_phi_index, load_clone); - } else { - // Multipe edges can be replaced at once - on calls for example - load_use->replace_edge(load, load_clone); + // Remove def bits + const OptoReg::Name first = bs->refine_register(node, regalloc->get_reg_first(node)); + const OptoReg::Name second = bs->refine_register(node, regalloc->get_reg_second(node)); + if (first != OptoReg::Bad) { + new_live.Remove(first); } - --i; // more than one edge can have been removed, but the next is in later iterations - - // We could break the for-loop after finding a dominating match. - // But keep iterating to catch any bad idom early. - found_dominating_catchproj = true; - } + if (second != OptoReg::Bad) { + new_live.Remove(second); + } - // We found no single catchproj that dominated the use - The use is at a point after - // where control flow from multiple catch projs have merged. We will have to create - // phi nodes before the use and tie the output from the cloned loads together. It - // can be a single phi or a number of chained phis, depending on control flow - if (!found_dominating_catchproj) { - - // Use phi-control if use is a phi - if (load_use_is_phi) { - load_use_control = phi_ctrl; - } - assert(phase->is_dominator(ctrl, load_use_control), "Common use but no dominator"); - - // Clone a load on all paths - for (int c = 0; c < number_of_catch_projs; c++) { - Node* catchproj = catch_node->raw_out(c); - Node* load_clone = proj_to_load_mapping[c]; - if (load_clone == NULL) { - load_clone = clone_load_to_catchproj(phase, load, catchproj); - proj_to_load_mapping[c] = load_clone; + // Add use bits + for (uint j = 1; j < node->req(); ++j) { + const Node* const use = node->in(j); + const OptoReg::Name first = bs->refine_register(use, regalloc->get_reg_first(use)); + const OptoReg::Name second = bs->refine_register(use, regalloc->get_reg_second(use)); + if (first != OptoReg::Bad) { + new_live.Insert(first); + } + if (second != OptoReg::Bad) { + new_live.Insert(second); } } - // Move up dominator tree from use until dom front is reached - Node* next_region = get_dominating_region(phase, load_use_control, ctrl); - while (phase->idom(next_region) != catch_node) { - next_region = phase->idom(next_region); - if (trace) tty->print_cr("Moving up idom to region ctrl %i", next_region->_idx); - } - assert(phase->is_dominator(catch_node, next_region), "Sanity"); - - // Create or reuse phi node that collect all cloned loads and feed it to the use. - Node* test_phi = phi_map[next_region->_idx]; - if ((test_phi != NULL) && test_phi->is_Phi()) { - // Reuse an already created phi - if (trace) tty->print_cr(" Using cached Phi %i on load_use %i", test_phi->_idx, load_use->_idx); - phase->igvn().rehash_node_delayed(load_use); - load_use->replace_edge(load, test_phi); - // Now this use is done - } else { - // Otherwise we need to create one or more phis - PhiNode* next_phi = new PhiNode(next_region, load->type()); - phi_map[next_region->_idx] = next_phi; // cache new phi - phase->igvn().rehash_node_delayed(load_use); - load_use->replace_edge(load, next_phi); - - int dominators_of_region = 0; - do { - // New phi, connect to region and add all loads as in. - Node* region = next_region; - assert(region->isa_Region() && region->req() > 2, "Catch dead region nodes"); - PhiNode* new_phi = next_phi; - - if (trace) tty->print_cr("Created Phi %i on load %i with control %i", new_phi->_idx, load->_idx, region->_idx); - - // Need to add all cloned loads to the phi, taking care that the right path is matched - dominators_of_region = 0; // reset for new region - for (unsigned int reg_i = 1; reg_i < region->req(); reg_i++) { - Node* region_pred = region->in(reg_i); - assert(region_pred->is_CFG(), "check"); - bool pred_has_dominator = false; - for (int c = 0; c < number_of_catch_projs; c++) { - Node* catchproj = catch_node->raw_out(c); - if (phase->is_dominator(catchproj, region_pred)) { - new_phi->set_req(reg_i, proj_to_load_mapping[c]); - if (trace) tty->print_cr(" - Phi in(%i) set to load %i", reg_i, proj_to_load_mapping[c]->_idx); - pred_has_dominator = true; - dominators_of_region++; - break; - } - } - - // Sometimes we need to chain several phis. - if (!pred_has_dominator) { - assert(dominators_of_region <= 1, "More than one region can't require extra phi"); - if (trace) tty->print_cr(" - Region %i pred %i not dominated by catch proj", region->_idx, region_pred->_idx); - // Continue search on on this region_pred - // - walk up to next region - // - create a new phi and connect to first new_phi - next_region = get_dominating_region(phase, region_pred, ctrl); - - // Lookup if there already is a phi, create a new otherwise - Node* test_phi = phi_map[next_region->_idx]; - if ((test_phi != NULL) && test_phi->is_Phi()) { - next_phi = test_phi->isa_Phi(); - dominators_of_region++; // record that a match was found and that we are done - if (trace) tty->print_cr(" Using cached phi Phi %i on control %i", next_phi->_idx, next_region->_idx); - } else { - next_phi = new PhiNode(next_region, load->type()); - phi_map[next_region->_idx] = next_phi; - } - new_phi->set_req(reg_i, next_phi); - } - } - - new_phi->set_req(0, region); - phase->igvn().register_new_node_with_optimizer(new_phi); - phase->set_ctrl(new_phi, region); - - assert(dominators_of_region != 0, "Must have found one this iteration"); - } while (dominators_of_region == 1); - } - --i; - } - } // end of loop over uses - - assert(load->outcnt() == 0, "All uses should be handled"); - phase->igvn().remove_dead_node(load); - phase->C->print_method(PHASE_CALL_CATCH_CLEANUP, 4, load->_idx); - - // Now we should be home - phase->igvn().set_delay_transform(false); -} - -// Sort out the loads that are between a call ant its catch blocks -static void process_catch_cleanup_candidate(PhaseIdealLoop* phase, LoadNode* load, bool verify) { - bool trace = phase->C->directive()->ZTraceLoadBarriersOption; - - Node* ctrl = get_ctrl_normalized(phase, load); - if (!ctrl->is_Proj() || (ctrl->in(0) == NULL) || !ctrl->in(0)->isa_Call()) { - return; - } - - Node* catch_node = ctrl->isa_Proj()->raw_out(0); - if (catch_node->is_Catch()) { - if (catch_node->outcnt() > 1) { - assert(!verify, "All loads should already have been moved"); - call_catch_cleanup_one(phase, load, ctrl); - } else { - if (trace) tty->print_cr("Call catch cleanup with only one catch: load %i ", load->_idx); - } - } -} - -void ZBarrierSetC2::barrier_insertion_phase(Compile* C, PhaseIterGVN& igvn) const { - PhaseIdealLoop::optimize(igvn, LoopOptsZBarrierInsertion); - if (C->failing()) return; -} - -bool ZBarrierSetC2::optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const { - - if (mode == LoopOptsZBarrierInsertion) { - // First make sure all loads between call and catch are moved to the catch block - clean_catch_blocks(phase); - DEBUG_ONLY(clean_catch_blocks(phase, true /* verify */);) - - // Then expand barriers on all loads - insert_load_barriers(phase); - - // Handle all Unsafe that need barriers. - insert_barriers_on_unsafe(phase); - - phase->C->clear_major_progress(); - return true; - } else { - return false; - } -} - -static bool can_simplify_cas(LoadStoreNode* node) { - if (node->isa_LoadStoreConditional()) { - Node *expected_in = node->as_LoadStoreConditional()->in(LoadStoreConditionalNode::ExpectedIn); - return (expected_in->get_ptr_type() == TypePtr::NULL_PTR); - } else { - return false; - } -} - -static void insert_barrier_before_unsafe(PhaseIdealLoop* phase, LoadStoreNode* old_node) { - - Compile *C = phase->C; - PhaseIterGVN &igvn = phase->igvn(); - LoadStoreNode* zclone = NULL; - - Node *in_ctrl = old_node->in(MemNode::Control); - Node *in_mem = old_node->in(MemNode::Memory); - Node *in_adr = old_node->in(MemNode::Address); - Node *in_val = old_node->in(MemNode::ValueIn); - const TypePtr *adr_type = old_node->adr_type(); - const TypePtr* load_type = TypeOopPtr::BOTTOM; // The type for the load we are adding - - switch (old_node->Opcode()) { - case Op_CompareAndExchangeP: { - zclone = new ZCompareAndExchangePNode(in_ctrl, in_mem, in_adr, in_val, old_node->in(LoadStoreConditionalNode::ExpectedIn), - adr_type, old_node->get_ptr_type(), ((CompareAndExchangeNode*)old_node)->order()); - load_type = old_node->bottom_type()->is_ptr(); - break; - } - case Op_WeakCompareAndSwapP: { - if (can_simplify_cas(old_node)) { - break; - } - zclone = new ZWeakCompareAndSwapPNode(in_ctrl, in_mem, in_adr, in_val, old_node->in(LoadStoreConditionalNode::ExpectedIn), - ((CompareAndSwapNode*)old_node)->order()); - adr_type = TypePtr::BOTTOM; - break; - } - case Op_CompareAndSwapP: { - if (can_simplify_cas(old_node)) { - break; - } - zclone = new ZCompareAndSwapPNode(in_ctrl, in_mem, in_adr, in_val, old_node->in(LoadStoreConditionalNode::ExpectedIn), - ((CompareAndSwapNode*)old_node)->order()); - adr_type = TypePtr::BOTTOM; - break; - } - case Op_GetAndSetP: { - zclone = new ZGetAndSetPNode(in_ctrl, in_mem, in_adr, in_val, old_node->adr_type(), old_node->get_ptr_type()); - load_type = old_node->bottom_type()->is_ptr(); - break; - } - } - if (zclone != NULL) { - igvn.register_new_node_with_optimizer(zclone, old_node); - - // Make load - LoadPNode *load = new LoadPNode(NULL, in_mem, in_adr, adr_type, load_type, MemNode::unordered, - LoadNode::DependsOnlyOnTest); - load_set_expanded_barrier(load); - igvn.register_new_node_with_optimizer(load); - igvn.replace_node(old_node, zclone); - - Node *barrier = new LoadBarrierNode(C, NULL, in_mem, load, in_adr, false /* weak */); - Node *barrier_val = new ProjNode(barrier, LoadBarrierNode::Oop); - Node *barrier_ctrl = new ProjNode(barrier, LoadBarrierNode::Control); - - igvn.register_new_node_with_optimizer(barrier); - igvn.register_new_node_with_optimizer(barrier_val); - igvn.register_new_node_with_optimizer(barrier_ctrl); - - // loop over all of in_ctrl usages and move to barrier_ctrl - for (DUIterator_Last imin, i = in_ctrl->last_outs(imin); i >= imin; --i) { - Node *use = in_ctrl->last_out(i); - uint l; - for (l = 0; use->in(l) != in_ctrl; l++) {} - igvn.replace_input_of(use, l, barrier_ctrl); - } - - load->set_req(MemNode::Control, in_ctrl); - barrier->set_req(LoadBarrierNode::Control, in_ctrl); - zclone->add_req(barrier_val); // add req as keep alive. - - C->print_method(PHASE_ADD_UNSAFE_BARRIER, 4, zclone->_idx); - } -} - -void ZBarrierSetC2::insert_barriers_on_unsafe(PhaseIdealLoop* phase) const { - Compile *C = phase->C; - PhaseIterGVN &igvn = phase->igvn(); - uint new_ids = C->unique(); - VectorSet visited(Thread::current()->resource_area()); - GrowableArray nodeStack(Thread::current()->resource_area(), 0, 0, NULL); - nodeStack.push(C->root()); - visited.test_set(C->root()->_idx); - - // Traverse all nodes, visit all unsafe ops that require a barrier - while (nodeStack.length() > 0) { - Node *n = nodeStack.pop(); - - bool is_old_node = (n->_idx < new_ids); // don't process nodes that were created during cleanup - if (is_old_node) { - if (n->is_LoadStore()) { - LoadStoreNode* lsn = n->as_LoadStore(); - if (lsn->has_barrier()) { - BasicType bt = lsn->in(MemNode::Address)->bottom_type()->basic_type(); - assert (is_reference_type(bt), "Sanity test"); - insert_barrier_before_unsafe(phase, lsn); - } - } - } - for (uint i = 0; i < n->len(); i++) { - if (n->in(i)) { - if (!visited.test_set(n->in(i)->_idx)) { - nodeStack.push(n->in(i)); - } - } - } - } - - igvn.optimize(); - C->print_method(PHASE_ADD_UNSAFE_BARRIER, 2); -} - -// The purpose of ZBarrierSetC2::clean_catch_blocks is to prepare the IR for -// splicing in load barrier nodes. -// -// The problem is that we might have instructions between a call and its catch nodes. -// (This is usually handled in PhaseCFG:call_catch_cleanup, which clones mach nodes in -// already scheduled blocks.) We can't have loads that require barriers there, -// because we need to splice in new control flow, and that would violate the IR. -// -// clean_catch_blocks find all Loads that require a barrier and clone them and any -// dependent instructions to each use. The loads must be in the beginning of the catch block -// before any store. -// -// Sometimes the loads use will be at a place dominated by all catch blocks, then we need -// a load in each catch block, and a Phi at the dominated use. - -void ZBarrierSetC2::clean_catch_blocks(PhaseIdealLoop* phase, bool verify) const { - - Compile *C = phase->C; - uint new_ids = C->unique(); - PhaseIterGVN &igvn = phase->igvn(); - VectorSet visited(Thread::current()->resource_area()); - GrowableArray nodeStack(Thread::current()->resource_area(), 0, 0, NULL); - nodeStack.push(C->root()); - visited.test_set(C->root()->_idx); - - // Traverse all nodes, visit all loads that require a barrier - while(nodeStack.length() > 0) { - Node *n = nodeStack.pop(); - - for (uint i = 0; i < n->len(); i++) { - if (n->in(i)) { - if (!visited.test_set(n->in(i)->_idx)) { - nodeStack.push(n->in(i)); - } + // If this node tracks liveness, update it + RegMask* const regs = barrier_set_state()->live(node); + if (regs != NULL) { + regs->OR(new_live); } } - bool is_old_node = (n->_idx < new_ids); // don't process nodes that were created during cleanup - if (n->is_Load() && is_old_node) { - LoadNode* load = n->isa_Load(); - // only care about loads that will have a barrier - if (load_require_barrier(load)) { - process_catch_cleanup_candidate(phase, load, verify); - } - } - } - - C->print_method(PHASE_CALL_CATCH_CLEANUP, 2); -} - -class DomDepthCompareClosure : public CompareClosure { - PhaseIdealLoop* _phase; - -public: - DomDepthCompareClosure(PhaseIdealLoop* phase) : _phase(phase) { } - - int do_compare(LoadNode* const &n1, LoadNode* const &n2) { - int d1 = _phase->dom_depth(_phase->get_ctrl(n1)); - int d2 = _phase->dom_depth(_phase->get_ctrl(n2)); - if (d1 == d2) { - // Compare index if the depth is the same, ensures all entries are unique. - return n1->_idx - n2->_idx; - } else { - return d2 - d1; - } - } -}; - -// Traverse graph and add all loadPs to list, sorted by dom depth -void gather_loadnodes_sorted(PhaseIdealLoop* phase, GrowableArray* loadList) { - - VectorSet visited(Thread::current()->resource_area()); - GrowableArray nodeStack(Thread::current()->resource_area(), 0, 0, NULL); - DomDepthCompareClosure ddcc(phase); - - nodeStack.push(phase->C->root()); - while(nodeStack.length() > 0) { - Node *n = nodeStack.pop(); - if (visited.test(n->_idx)) { - continue; - } - - if (n->isa_Load()) { - LoadNode *load = n->as_Load(); - if (load_require_barrier(load)) { - assert(phase->get_ctrl(load) != NULL, "sanity"); - assert(phase->dom_depth(phase->get_ctrl(load)) != 0, "sanity"); - loadList->insert_sorted(&ddcc, load); - } - } - - visited.set(n->_idx); - for (uint i = 0; i < n->req(); i++) { - if (n->in(i)) { - if (!visited.test(n->in(i)->_idx)) { - nodeStack.push(n->in(i)); - } + // Now at block top, see if we have any changes + new_live.SUBTRACT(old_live); + if (new_live.is_NotEmpty()) { + // Liveness has refined, update and propagate to prior blocks + old_live.OR(new_live); + for (uint i = 1; i < block->num_preds(); ++i) { + Block* const pred = cfg->get_block_for_node(block->pred(i)); + worklist.push(pred); } } } } - -// Add LoadBarriers to all LoadPs -void ZBarrierSetC2::insert_load_barriers(PhaseIdealLoop* phase) const { - - bool trace = phase->C->directive()->ZTraceLoadBarriersOption; - GrowableArray loadList(Thread::current()->resource_area(), 0, 0, NULL); - gather_loadnodes_sorted(phase, &loadList); - - PhaseIterGVN &igvn = phase->igvn(); - int count = 0; - - for (GrowableArrayIterator loadIter = loadList.begin(); loadIter != loadList.end(); ++loadIter) { - LoadNode *load = *loadIter; - - if (load_has_expanded_barrier(load)) { - continue; - } - - do { - // Insert a barrier on a loadP - // if another load is found that needs to be expanded first, retry on that one - LoadNode* result = insert_one_loadbarrier(phase, load, phase->get_ctrl(load)); - while (result != NULL) { - result = insert_one_loadbarrier(phase, result, phase->get_ctrl(result)); - } - } while (!load_has_expanded_barrier(load)); - } - - phase->C->print_method(PHASE_INSERT_BARRIER, 2); -} - -void push_antidependent_stores(PhaseIdealLoop* phase, Node_Stack& nodestack, LoadNode* start_load) { - // push all stores on the same mem, that can_alias - // Any load found must be handled first - PhaseIterGVN &igvn = phase->igvn(); - int load_alias_idx = igvn.C->get_alias_index(start_load->adr_type()); - - Node *mem = start_load->in(1); - for (DUIterator_Fast imax, u = mem->fast_outs(imax); u < imax; u++) { - Node *mem_use = mem->fast_out(u); - - if (mem_use == start_load) continue; - if (!mem_use->is_Store()) continue; - if (!phase->has_ctrl(mem_use)) continue; - if (phase->get_ctrl(mem_use) != phase->get_ctrl(start_load)) continue; - - // add any aliasing store in this block - StoreNode *store = mem_use->isa_Store(); - const TypePtr *adr_type = store->adr_type(); - if (igvn.C->can_alias(adr_type, load_alias_idx)) { - nodestack.push(store, 0); - } - } -} - -LoadNode* ZBarrierSetC2::insert_one_loadbarrier(PhaseIdealLoop* phase, LoadNode* start_load, Node* ctrl) const { - bool trace = phase->C->directive()->ZTraceLoadBarriersOption; - PhaseIterGVN &igvn = phase->igvn(); - - // Check for other loadPs at the same loop depth that is reachable by a DFS - // - if found - return it. It needs to be inserted first - // - otherwise proceed and insert barrier - - VectorSet visited(Thread::current()->resource_area()); - Node_Stack nodestack(100); - - nodestack.push(start_load, 0); - push_antidependent_stores(phase, nodestack, start_load); - - while(!nodestack.is_empty()) { - Node* n = nodestack.node(); // peek - nodestack.pop(); - if (visited.test(n->_idx)) { - continue; - } - - if (n->is_Load() && n != start_load && load_require_barrier(n->as_Load()) && !load_has_expanded_barrier(n->as_Load())) { - // Found another load that needs a barrier in the same block. Must expand later loads first. - if (trace) tty->print_cr(" * Found LoadP %i on DFS", n->_idx); - return n->as_Load(); // return node that should be expanded first - } - - if (!phase->has_ctrl(n)) continue; - if (phase->get_ctrl(n) != phase->get_ctrl(start_load)) continue; - if (n->is_Phi()) continue; - - visited.set(n->_idx); - // push all children - for (DUIterator_Fast imax, ii = n->fast_outs(imax); ii < imax; ii++) { - Node* c = n->fast_out(ii); - if (c != NULL) { - nodestack.push(c, 0); - } - } - } - - insert_one_loadbarrier_inner(phase, start_load, ctrl, visited); - return NULL; -} - -void ZBarrierSetC2::insert_one_loadbarrier_inner(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl, VectorSet visited2) const { - PhaseIterGVN &igvn = phase->igvn(); - Compile* C = igvn.C; - bool trace = C->directive()->ZTraceLoadBarriersOption; - - // create barrier - Node* barrier = new LoadBarrierNode(C, NULL, load->in(LoadNode::Memory), NULL, load->in(LoadNode::Address), load_has_weak_barrier(load)); - Node* barrier_val = new ProjNode(barrier, LoadBarrierNode::Oop); - Node* barrier_ctrl = new ProjNode(barrier, LoadBarrierNode::Control); - ctrl = normalize_ctrl(ctrl); - - if (trace) tty->print_cr("Insert load %i with barrier: %i and ctrl : %i", load->_idx, barrier->_idx, ctrl->_idx); - - // Splice control - // - insert barrier control diamond between loads ctrl and ctrl successor on path to block end. - // - If control successor is a catch, step over to next. - Node* ctrl_succ = NULL; - for (DUIterator_Fast imax, j = ctrl->fast_outs(imax); j < imax; j++) { - Node* tmp = ctrl->fast_out(j); - - // - CFG nodes is the ones we are going to splice (1 only!) - // - Phi nodes will continue to hang from the region node! - // - self loops should be skipped - if (tmp->is_Phi() || tmp == ctrl) { - continue; - } - - if (tmp->is_CFG()) { - assert(ctrl_succ == NULL, "There can be only one"); - ctrl_succ = tmp; - continue; - } - } - - // Now splice control - assert(ctrl_succ != load, "sanity"); - assert(ctrl_succ != NULL, "Broken IR"); - bool found = false; - for(uint k = 0; k < ctrl_succ->req(); k++) { - if (ctrl_succ->in(k) == ctrl) { - assert(!found, "sanity"); - if (trace) tty->print_cr(" Move CFG ctrl_succ %i to barrier_ctrl", ctrl_succ->_idx); - igvn.replace_input_of(ctrl_succ, k, barrier_ctrl); - found = true; - k--; - } - } - - // For all successors of ctrl - move all visited to become successors of barrier_ctrl instead - for (DUIterator_Fast imax, r = ctrl->fast_outs(imax); r < imax; r++) { - Node* tmp = ctrl->fast_out(r); - if (tmp->is_SafePoint() || (visited2.test(tmp->_idx) && (tmp != load))) { - if (trace) tty->print_cr(" Move ctrl_succ %i to barrier_ctrl", tmp->_idx); - igvn.replace_input_of(tmp, 0, barrier_ctrl); - --r; --imax; - } - } - - // Move the loads user to the barrier - for (DUIterator_Fast imax, i = load->fast_outs(imax); i < imax; i++) { - Node* u = load->fast_out(i); - if (u->isa_LoadBarrier()) { - continue; - } - - // find correct input - replace with iterator? - for(uint j = 0; j < u->req(); j++) { - if (u->in(j) == load) { - igvn.replace_input_of(u, j, barrier_val); - --i; --imax; // Adjust the iterator of the *outer* loop - break; // some nodes (calls) might have several uses from the same node - } - } - } - - // Connect barrier to load and control - barrier->set_req(LoadBarrierNode::Oop, load); - barrier->set_req(LoadBarrierNode::Control, ctrl); - - igvn.replace_input_of(load, MemNode::Control, ctrl); - load->pin(); - - igvn.rehash_node_delayed(load); - igvn.register_new_node_with_optimizer(barrier); - igvn.register_new_node_with_optimizer(barrier_val); - igvn.register_new_node_with_optimizer(barrier_ctrl); - load_set_expanded_barrier(load); - - C->print_method(PHASE_INSERT_BARRIER, 3, load->_idx); -} - -// The bad_mask in the ThreadLocalData shouldn't have an anti-dep-check. -// The bad_mask address if of type TypeRawPtr, but that will alias -// InitializeNodes until the type system is expanded. -bool ZBarrierSetC2::needs_anti_dependence_check(const Node* node) const { - MachNode* mnode = node->as_Mach(); - if (mnode != NULL) { - intptr_t offset = 0; - const TypePtr *adr_type2 = NULL; - const Node* base = mnode->get_base_and_disp(offset, adr_type2); - if ((base != NULL) && - (base->is_Mach() && base->as_Mach()->ideal_Opcode() == Op_ThreadLocal) && - (offset == in_bytes(ZThreadLocalData::address_bad_mask_offset()))) { - return false; - } - } - return true; -} diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp --- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -29,134 +29,38 @@ #include "opto/node.hpp" #include "utilities/growableArray.hpp" -class ZCompareAndSwapPNode : public CompareAndSwapPNode { -public: - ZCompareAndSwapPNode(Node* c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapPNode(c, mem, adr, val, ex, mem_ord) { } - virtual int Opcode() const; -}; - -class ZWeakCompareAndSwapPNode : public WeakCompareAndSwapPNode { -public: - ZWeakCompareAndSwapPNode(Node* c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : WeakCompareAndSwapPNode(c, mem, adr, val, ex, mem_ord) { } - virtual int Opcode() const; -}; +const uint8_t ZLoadBarrierStrong = 1; +const uint8_t ZLoadBarrierWeak = 2; +const uint8_t ZLoadBarrierElided = 3; -class ZCompareAndExchangePNode : public CompareAndExchangePNode { -public: - ZCompareAndExchangePNode(Node* c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangePNode(c, mem, adr, val, ex, at, t, mem_ord) { } - virtual int Opcode() const; -}; +class ZLoadBarrierStubC2 : public ResourceObj { +private: + const MachNode* _node; + const Address _ref_addr; + const Register _ref; + const Register _tmp; + const bool _weak; + Label _entry; + Label _continuation; -class ZGetAndSetPNode : public GetAndSetPNode { -public: - ZGetAndSetPNode(Node* c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t) : GetAndSetPNode(c, mem, adr, val, at, t) { } - virtual int Opcode() const; -}; - -class LoadBarrierNode : public MultiNode { -private: - bool _weak; // On strong or weak oop reference - static bool is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n); - void push_dominated_barriers(PhaseIterGVN* igvn) const; + ZLoadBarrierStubC2(const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak); public: - enum { - Control, - Memory, - Oop, - Address, - Number_of_Outputs = Address, - Similar, - Number_of_Inputs - }; - - LoadBarrierNode(Compile* C, - Node* c, - Node* mem, - Node* val, - Node* adr, - bool weak); - - virtual int Opcode() const; - virtual uint size_of() const; - virtual bool cmp(const Node& n) const; - virtual const Type *bottom_type() const; - virtual const TypePtr* adr_type() const; - virtual const Type *Value(PhaseGVN *phase) const; - virtual Node *Identity(PhaseGVN *phase); - virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); - virtual uint match_edge(uint idx) const; - - LoadBarrierNode* has_dominating_barrier(PhaseIdealLoop* phase, - bool linear_only, - bool look_for_similar); - - void fix_similar_in_uses(PhaseIterGVN* igvn); - - bool has_true_uses() const; - - bool can_be_eliminated() const { - return !in(Similar)->is_top(); - } - - bool is_weak() const { - return _weak; - } -}; + static ZLoadBarrierStubC2* create(const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak); -class LoadBarrierSlowRegNode : public TypeNode { -private: - bool _is_weak; -public: - LoadBarrierSlowRegNode(Node *c, - Node *adr, - Node *src, - const TypePtr* t, - bool weak) : - TypeNode(t, 3), _is_weak(weak) { - init_req(1, adr); - init_req(2, src); - init_class_id(Class_LoadBarrierSlowReg); - } - - virtual uint size_of() const { - return sizeof(*this); - } - - virtual const char * name() { - return "LoadBarrierSlowRegNode"; - } - - virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) { - return NULL; - } - - virtual int Opcode() const; - - bool is_weak() { return _is_weak; } -}; - -class ZBarrierSetC2State : public ResourceObj { -private: - // List of load barrier nodes which need to be expanded before matching - GrowableArray* _load_barrier_nodes; - -public: - ZBarrierSetC2State(Arena* comp_arena); - int load_barrier_count() const; - void add_load_barrier_node(LoadBarrierNode* n); - void remove_load_barrier_node(LoadBarrierNode* n); - LoadBarrierNode* load_barrier_node(int idx) const; + Address ref_addr() const; + Register ref() const; + Register tmp() const; + address slow_path() const; + RegMask& live() const; + Label* entry(); + Label* continuation(); }; class ZBarrierSetC2 : public BarrierSetC2 { private: - ZBarrierSetC2State* state() const; - void expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const; - -#ifdef ASSERT - void verify_gc_barriers(bool post_parse) const; -#endif + void compute_liveness_at_stubs() const; + void analyze_dominating_barriers() const; protected: virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const; @@ -174,43 +78,14 @@ public: virtual void* create_barrier_state(Arena* comp_arena) const; - - virtual bool has_load_barriers() const { return true; } - virtual bool is_gc_barrier_node(Node* node) const; - virtual Node* step_over_gc_barrier(Node* c) const; - virtual Node* step_over_gc_barrier_ctrl(Node* c) const; - - virtual void register_potential_barrier_node(Node* node) const; - virtual void unregister_potential_barrier_node(Node* node) const; - virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { } - virtual void enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const; - virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const; - - virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const; + virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, + BasicType type, + bool is_clone, + ArrayCopyPhase phase) const; - virtual bool expand_barriers(Compile* C, PhaseIterGVN& igvn) const; - virtual bool final_graph_reshaping(Compile* compile, Node* n, uint opcode) const; - virtual bool matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const; - virtual bool matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const; - virtual bool needs_anti_dependence_check(const Node* node) const; - -#ifdef ASSERT - virtual void verify_gc_barriers(Compile* compile, CompilePhase phase) const; -#endif - - // Load barrier insertion and expansion external - virtual void barrier_insertion_phase(Compile* C, PhaseIterGVN &igvn) const; - virtual bool optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const; - virtual bool is_gc_specific_loop_opts_pass(LoopOptsMode mode) const { return (mode == LoopOptsZBarrierInsertion); } - virtual bool strip_mined_loops_expanded(LoopOptsMode mode) const { return mode == LoopOptsZBarrierInsertion; } - -private: - // Load barrier insertion and expansion internal - void insert_barriers_on_unsafe(PhaseIdealLoop* phase) const; - void clean_catch_blocks(PhaseIdealLoop* phase, bool verify = false) const; - void insert_load_barriers(PhaseIdealLoop* phase) const; - LoadNode* insert_one_loadbarrier(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl) const; - void insert_one_loadbarrier_inner(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl, VectorSet visited) const; + virtual void late_barrier_analysis() const; + virtual int estimate_stub_size() const; + virtual void emit_stubs(CodeBuffer& cb) const; }; #endif // SHARE_GC_Z_C2_ZBARRIERSETC2_HPP diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/z/zBarrierSetAssembler.hpp --- a/src/hotspot/share/gc/z/zBarrierSetAssembler.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/z/zBarrierSetAssembler.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,10 +24,7 @@ #ifndef SHARE_GC_Z_ZBARRIERSETASSEMBLER_HPP #define SHARE_GC_Z_ZBARRIERSETASSEMBLER_HPP -#include "asm/macroAssembler.hpp" #include "gc/shared/barrierSetAssembler.hpp" -#include "oops/accessDecorators.hpp" -#include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" class ZBarrierSetAssemblerBase : public BarrierSetAssembler { diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/z/zLock.inline.hpp --- a/src/hotspot/share/gc/z/zLock.inline.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/z/zLock.inline.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -80,12 +80,16 @@ template inline ZLocker::ZLocker(T* lock) : _lock(lock) { - _lock->lock(); + if (_lock != NULL) { + _lock->lock(); + } } template inline ZLocker::~ZLocker() { - _lock->unlock(); + if (_lock != NULL) { + _lock->unlock(); + } } #endif // SHARE_GC_Z_ZLOCK_INLINE_HPP diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/z/zNMethodTable.cpp --- a/src/hotspot/share/gc/z/zNMethodTable.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/z/zNMethodTable.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -51,7 +51,7 @@ size_t ZNMethodTable::_nregistered = 0; size_t ZNMethodTable::_nunregistered = 0; ZNMethodTableIteration ZNMethodTable::_iteration; -ZSafeDelete ZNMethodTable::_safe_delete; +ZSafeDeleteNoLock ZNMethodTable::_safe_delete; size_t ZNMethodTable::first_index(const nmethod* nm, size_t size) { assert(is_power_of_2(size), "Invalid size"); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/z/zNMethodTable.hpp --- a/src/hotspot/share/gc/z/zNMethodTable.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/z/zNMethodTable.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -35,12 +35,12 @@ class ZNMethodTable : public AllStatic { private: - static ZNMethodTableEntry* _table; - static size_t _size; - static size_t _nregistered; - static size_t _nunregistered; - static ZNMethodTableIteration _iteration; - static ZSafeDelete _safe_delete; + static ZNMethodTableEntry* _table; + static size_t _size; + static size_t _nregistered; + static size_t _nunregistered; + static ZNMethodTableIteration _iteration; + static ZSafeDeleteNoLock _safe_delete; static ZNMethodTableEntry* create(size_t size); static void destroy(ZNMethodTableEntry* table); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/z/zSafeDelete.hpp --- a/src/hotspot/share/gc/z/zSafeDelete.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/z/zSafeDelete.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -29,11 +29,11 @@ #include "metaprogramming/removeExtent.hpp" template -class ZSafeDelete { +class ZSafeDeleteImpl { private: typedef typename RemoveExtent::type ItemT; - ZLock _lock; + ZLock* _lock; uint64_t _enabled; ZArray _deferred; @@ -41,7 +41,7 @@ void immediate_delete(ItemT* item); public: - ZSafeDelete(); + ZSafeDeleteImpl(ZLock* lock); void enable_deferred_delete(); void disable_deferred_delete(); @@ -49,4 +49,19 @@ void operator()(ItemT* item); }; +template +class ZSafeDelete : public ZSafeDeleteImpl { +private: + ZLock _lock; + +public: + ZSafeDelete(); +}; + +template +class ZSafeDeleteNoLock : public ZSafeDeleteImpl { +public: + ZSafeDeleteNoLock(); +}; + #endif // SHARE_GC_Z_ZSAFEDELETE_HPP diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/z/zSafeDelete.inline.hpp --- a/src/hotspot/share/gc/z/zSafeDelete.inline.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/z/zSafeDelete.inline.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -30,14 +30,14 @@ #include "utilities/debug.hpp" template -ZSafeDelete::ZSafeDelete() : - _lock(), +ZSafeDeleteImpl::ZSafeDeleteImpl(ZLock* lock) : + _lock(lock), _enabled(0), _deferred() {} template -bool ZSafeDelete::deferred_delete(ItemT* item) { - ZLocker locker(&_lock); +bool ZSafeDeleteImpl::deferred_delete(ItemT* item) { + ZLocker locker(_lock); if (_enabled > 0) { _deferred.add(item); return true; @@ -47,7 +47,7 @@ } template -void ZSafeDelete::immediate_delete(ItemT* item) { +void ZSafeDeleteImpl::immediate_delete(ItemT* item) { if (IsArray::value) { delete [] item; } else { @@ -56,17 +56,17 @@ } template -void ZSafeDelete::enable_deferred_delete() { - ZLocker locker(&_lock); +void ZSafeDeleteImpl::enable_deferred_delete() { + ZLocker locker(_lock); _enabled++; } template -void ZSafeDelete::disable_deferred_delete() { +void ZSafeDeleteImpl::disable_deferred_delete() { ZArray deferred; { - ZLocker locker(&_lock); + ZLocker locker(_lock); assert(_enabled > 0, "Invalid state"); if (--_enabled == 0) { deferred.transfer(&_deferred); @@ -80,10 +80,19 @@ } template -void ZSafeDelete::operator()(ItemT* item) { +void ZSafeDeleteImpl::operator()(ItemT* item) { if (!deferred_delete(item)) { immediate_delete(item); } } +template +ZSafeDelete::ZSafeDelete() : + ZSafeDeleteImpl(&_lock), + _lock() {} + +template +ZSafeDeleteNoLock::ZSafeDeleteNoLock() : + ZSafeDeleteImpl(NULL) {} + #endif // SHARE_GC_Z_ZSAFEDELETE_INLINE_HPP diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/gc/z/z_globals.hpp --- a/src/hotspot/share/gc/z/z_globals.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/gc/z/z_globals.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -86,9 +86,6 @@ "Verify marking stacks") \ \ diagnostic(bool, ZVerifyForwarding, false, \ - "Verify forwarding tables") \ - \ - develop(bool, ZVerifyLoadBarriers, false, \ - "Verify that reference loads are followed by barriers") + "Verify forwarding tables") #endif // SHARE_GC_Z_Z_GLOBALS_HPP diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/include/jvm.h --- a/src/hotspot/share/include/jvm.h Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/include/jvm.h Wed Oct 16 15:31:05 2019 +0200 @@ -1044,19 +1044,6 @@ #include "classfile_constants.h" /* - * A function defined by the byte-code verifier and called by the VM. - * This is not a function implemented in the VM. - * - * Returns JNI_FALSE if verification fails. A detailed error message - * will be places in msg_buf, whose length is specified by buf_len. - */ -typedef jboolean (*verifier_fn_t)(JNIEnv *env, - jclass cb, - char * msg_buf, - jint buf_len); - - -/* * Support for a VM-independent class format checker. */ typedef struct { @@ -1086,28 +1073,6 @@ typedef jstring (*to_java_string_fn_t)(JNIEnv *env, char *str); -typedef char *(*to_c_string_fn_t)(JNIEnv *env, jstring s, jboolean *b); - -/* This is the function defined in libjava.so that performs class - * format checks. This functions fills in size information about - * the class file and returns: - * - * 0: good - * -1: out of memory - * -2: bad format - * -3: unsupported version - * -4: bad class name - */ - -typedef jint (*check_format_fn_t)(char *class_name, - unsigned char *data, - unsigned int data_size, - class_size_info *class_size, - char *message_buffer, - jint buffer_length, - jboolean measure_only, - jboolean check_relaxed); - #define JVM_RECOGNIZED_CLASS_MODIFIERS (JVM_ACC_PUBLIC | \ JVM_ACC_FINAL | \ JVM_ACC_SUPER | \ diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/interpreter/interpreterRuntime.cpp --- a/src/hotspot/share/interpreter/interpreterRuntime.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -28,6 +28,7 @@ #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" +#include "compiler/compilationPolicy.hpp" #include "compiler/compileBroker.hpp" #include "compiler/disassembler.hpp" #include "gc/shared/barrierSetNMethod.hpp" @@ -52,7 +53,6 @@ #include "prims/nativeLookup.hpp" #include "runtime/atomic.hpp" #include "runtime/biasedLocking.hpp" -#include "runtime/compilationPolicy.hpp" #include "runtime/deoptimization.hpp" #include "runtime/fieldDescriptor.inline.hpp" #include "runtime/frame.inline.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/interpreter/linkResolver.cpp --- a/src/hotspot/share/interpreter/linkResolver.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/interpreter/linkResolver.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -30,6 +30,7 @@ #include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" +#include "compiler/compilationPolicy.hpp" #include "compiler/compileBroker.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/bootstrapInfo.hpp" @@ -48,7 +49,6 @@ #include "oops/oop.inline.hpp" #include "prims/methodHandles.hpp" #include "prims/nativeLookup.hpp" -#include "runtime/compilationPolicy.hpp" #include "runtime/fieldDescriptor.inline.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp --- a/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -393,6 +393,10 @@ Service_lock->unlock(); } + if (UseNotificationThread && Notification_lock->owned_by_self()) { + Notification_lock->unlock(); + } + if (CodeCache_lock->owned_by_self()) { CodeCache_lock->unlock(); } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/jvmci/compilerRuntime.cpp --- a/src/hotspot/share/jvmci/compilerRuntime.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/jvmci/compilerRuntime.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -25,11 +25,11 @@ #include "aot/aotLoader.hpp" #include "classfile/stringTable.hpp" #include "classfile/symbolTable.hpp" +#include "compiler/compilationPolicy.hpp" #include "interpreter/linkResolver.hpp" #include "jvmci/compilerRuntime.hpp" #include "oops/cpCache.inline.hpp" #include "oops/oop.inline.hpp" -#include "runtime/compilationPolicy.hpp" #include "runtime/deoptimization.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/jvmci/jvmciCodeInstaller.cpp --- a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -642,11 +642,9 @@ failed_speculations, speculations, speculations_len); cb = nm->as_codeblob_or_null(); if (nm != NULL && compile_state == NULL) { + // This compile didn't come through the CompileBroker so perform the printing here DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, compiler); - bool printnmethods = directive->PrintAssemblyOption || directive->PrintNMethodsOption; - if (!printnmethods && (PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers)) { - nm->print_nmethod(printnmethods); - } + nm->maybe_print_nmethod(directive); DirectivesStack::release(directive); } } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp --- a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -31,6 +31,7 @@ #include "jvmci/vmStructs_jvmci.hpp" #include "memory/universe.hpp" #include "oops/compressedOops.hpp" +#include "oops/klass.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "utilities/resourceHash.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/memory/metaspace/virtualSpaceNode.hpp --- a/src/hotspot/share/memory/metaspace/virtualSpaceNode.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/memory/metaspace/virtualSpaceNode.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -46,7 +46,7 @@ class CommitLimiter; class MetachunkListCluster; -// VirtualSpaceNode manage a single address range of the Metaspace. +// VirtualSpaceNode manages a single address range of the Metaspace. // // That address range may contain interleaved committed and uncommitted // regions. It keeps track of which regions have committed and offers diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/oops/instanceKlass.hpp --- a/src/hotspot/share/oops/instanceKlass.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/oops/instanceKlass.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -992,7 +992,6 @@ void process_interfaces(Thread *thread); // virtual operations from Klass - bool is_leaf_class() const { return _subklass == NULL; } GrowableArray* compute_secondary_supers(int num_extra_slots, Array* transitive_interfaces); bool can_be_primary_super_slow() const; diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/oops/klass.cpp --- a/src/hotspot/share/oops/klass.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/oops/klass.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -57,10 +57,6 @@ _java_mirror = class_loader_data()->add_handle(m); } -oop Klass::java_mirror() const { - return _java_mirror.resolve(); -} - oop Klass::java_mirror_no_keepalive() const { return _java_mirror.peek(); } @@ -681,8 +677,6 @@ } } -oop Klass::class_loader() const { return class_loader_data()->class_loader(); } - // In product mode, this function doesn't have virtual function calls so // there might be some performance advantage to handling InstanceKlass here. const char* Klass::external_name() const { @@ -826,14 +820,6 @@ return ClassLoaderDataGraph::is_valid(k->class_loader_data()); } -klassVtable Klass::vtable() const { - return klassVtable(const_cast(this), start_of_vtable(), vtable_length() / vtableEntry::size()); -} - -vtableEntry* Klass::start_of_vtable() const { - return (vtableEntry*) ((address)this + in_bytes(vtable_start_offset())); -} - Method* Klass::method_at_vtable(int index) { #ifndef PRODUCT assert(index >= 0, "valid vtable index"); @@ -844,9 +830,6 @@ return start_of_vtable()[index].method(); } -ByteSize Klass::vtable_start_offset() { - return in_ByteSize(InstanceKlass::header_size() * wordSize); -} #ifndef PRODUCT diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/oops/klass.hpp --- a/src/hotspot/share/oops/klass.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/oops/klass.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -469,8 +469,6 @@ virtual bool should_be_initialized() const { return false; } // initializes the klass virtual void initialize(TRAPS); - // lookup operation for MethodLookupCache - friend class MethodLookupCache; virtual Klass* find_field(Symbol* name, Symbol* signature, fieldDescriptor* fd) const; virtual Method* uncached_lookup_method(const Symbol* name, const Symbol* signature, OverpassLookupMode overpass_mode, @@ -537,9 +535,6 @@ } public: - // subclass accessor (here for convenience; undefined for non-klass objects) - virtual bool is_leaf_class() const { fatal("not a class"); return false; } - public: // ALL FUNCTIONS BELOW THIS POINT ARE DISPATCHED FROM AN OOP // These functions describe behavior for the oop not the KLASS. diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/oops/klass.inline.hpp --- a/src/hotspot/share/oops/klass.inline.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/oops/klass.inline.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -25,13 +25,35 @@ #ifndef SHARE_OOPS_KLASS_INLINE_HPP #define SHARE_OOPS_KLASS_INLINE_HPP +#include "classfile/classLoaderData.inline.hpp" #include "oops/compressedOops.hpp" #include "oops/klass.hpp" #include "oops/markWord.hpp" +#include "oops/oopHandle.inline.hpp" inline void Klass::set_prototype_header(markWord header) { assert(!header.has_bias_pattern() || is_instance_klass(), "biased locking currently only supported for Java instances"); _prototype_header = header; } +inline oop Klass::java_mirror() const { + return _java_mirror.resolve(); +} + +inline klassVtable Klass::vtable() const { + return klassVtable(const_cast(this), start_of_vtable(), vtable_length() / vtableEntry::size()); +} + +inline oop Klass::class_loader() const { + return class_loader_data()->class_loader(); +} + +inline vtableEntry* Klass::start_of_vtable() const { + return (vtableEntry*) ((address)this + in_bytes(vtable_start_offset())); +} + +inline ByteSize Klass::vtable_start_offset() { + return in_ByteSize(InstanceKlass::header_size() * wordSize); +} + #endif // SHARE_OOPS_KLASS_INLINE_HPP diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/oops/klassVtable.cpp --- a/src/hotspot/share/oops/klassVtable.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/oops/klassVtable.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -128,11 +128,6 @@ *vtable_length_ret = vtable_length; } -int klassVtable::index_of(Method* m, int len) const { - assert(m->has_vtable_index(), "do not ask this of non-vtable methods"); - return m->vtable_index(); -} - // Copy super class's vtable to the first part (prefix) of this class's vtable, // and return the number of entries copied. Expects that 'super' is the Java // super class (arrays can have "array" super classes that must be skipped). @@ -169,7 +164,6 @@ // Note: Arrays can have intermediate array supers. Use java_super to skip them. InstanceKlass* super = _klass->java_super(); - int nofNewEntries = 0; bool is_shared = _klass->is_shared(); @@ -1029,15 +1023,6 @@ } #endif // INCLUDE_JVMTI -// CDS/RedefineClasses support - clear vtables so they can be reinitialized -void klassVtable::clear_vtable() { - for (int i = 0; i < _length; i++) table()[i].clear(); -} - -bool klassVtable::is_initialized() { - return _length == 0 || table()[0].method() != NULL; -} - //----------------------------------------------------------------------------------------- // Itable code @@ -1468,31 +1453,6 @@ #endif } - -// inverse to itable_index -Method* klassItable::method_for_itable_index(InstanceKlass* intf, int itable_index) { - assert(intf->is_interface(), "sanity check"); - assert(intf->verify_itable_index(itable_index), ""); - Array* methods = InstanceKlass::cast(intf)->methods(); - - if (itable_index < 0 || itable_index >= method_count_for_interface(intf)) - return NULL; // help caller defend against bad indices - - int index = itable_index; - Method* m = methods->at(index); - int index2 = -1; - while (!m->has_itable_index() || - (index2 = m->itable_index()) != itable_index) { - assert(index2 < itable_index, "monotonic"); - if (++index == methods->length()) - return NULL; - m = methods->at(index); - } - assert(m->itable_index() == itable_index, "correct inverse"); - - return m; -} - void klassVtable::verify(outputStream* st, bool forced) { // make sure table is initialized if (!Universe::is_fully_initialized()) return; @@ -1541,7 +1501,6 @@ #endif void vtableEntry::verify(klassVtable* vt, outputStream* st) { - NOT_PRODUCT(FlagSetting fs(IgnoreLockingAssertions, true)); Klass* vtklass = vt->klass(); if (vtklass->is_instance_klass() && (InstanceKlass::cast(vtklass)->major_version() >= klassVtable::VTABLE_TRANSITIVE_OVERRIDE_VERSION)) { diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/oops/klassVtable.hpp --- a/src/hotspot/share/oops/klassVtable.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/oops/klassVtable.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -48,13 +48,6 @@ int _verify_count; // to make verify faster #endif - // Ordering important, so greater_than (>) can be used as an merge operator. - enum AccessType { - acc_private = 0, - acc_package_private = 1, - acc_publicprotected = 2 - }; - public: klassVtable(Klass* klass, void* base, int length) : _klass(klass) { _tableOffset = (address)base - (address)klass; _length = length; @@ -66,22 +59,12 @@ int length() const { return _length; } inline Method* method_at(int i) const; inline Method* unchecked_method_at(int i) const; - inline Method** adr_method_at(int i) const; // searching; all methods return -1 if not found - int index_of(Method* m) const { return index_of(m, _length); } int index_of_miranda(Symbol* name, Symbol* signature); void initialize_vtable(bool checkconstraints, TRAPS); // initialize vtable of a new klass - // CDS/RedefineClasses support - clear vtables so they can be reinitialized - // at dump time. Clearing gives us an easy way to tell if the vtable has - // already been reinitialized at dump time (see dump.cpp). Vtables can - // be initialized at run time by RedefineClasses so dumping the right order - // is necessary. - void clear_vtable(); - bool is_initialized(); - // computes vtable length (in words) and the number of miranda methods static void compute_vtable_size_and_num_mirandas(int* vtable_length, int* num_new_mirandas, @@ -125,7 +108,6 @@ private: void copy_vtable_to(vtableEntry* start); int initialize_from_super(Klass* super); - int index_of(Method* m, int len) const; // same as index_of, but search only up to len void put_method_at(Method* m, int index); static bool needs_new_vtable_entry(const methodHandle& m, const Klass* super, @@ -223,12 +205,6 @@ return table()[i].method(); } -inline Method** klassVtable::adr_method_at(int i) const { - // Allow one past the last entry to be referenced; useful for loop bounds. - assert(i >= 0 && i <= _length, "index out of bounds"); - return (Method**)(address(table() + i) + vtableEntry::method_offset_in_bytes()); -} - // -------------------------------------------------------------------------------- class klassItable; class itableMethodEntry; @@ -333,9 +309,6 @@ static int compute_itable_size(Array* transitive_interfaces); static void setup_itable_offset_table(InstanceKlass* klass); - // Resolving of method to index - static Method* method_for_itable_index(InstanceKlass* klass, int itable_index); - // Debugging/Statistics static void print_statistics() PRODUCT_RETURN; private: diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/oops/method.cpp --- a/src/hotspot/share/oops/method.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/oops/method.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -28,6 +28,7 @@ #include "classfile/systemDictionary.hpp" #include "code/codeCache.hpp" #include "code/debugInfoRec.hpp" +#include "compiler/compilationPolicy.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/bytecodeStream.hpp" #include "interpreter/bytecodeTracer.hpp" @@ -54,7 +55,6 @@ #include "prims/methodHandles.hpp" #include "prims/nativeLookup.hpp" #include "runtime/arguments.hpp" -#include "runtime/compilationPolicy.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/oops/methodData.cpp --- a/src/hotspot/share/oops/methodData.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/oops/methodData.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "classfile/systemDictionary.hpp" +#include "compiler/compilationPolicy.hpp" #include "compiler/compilerOracle.hpp" #include "interpreter/bytecode.hpp" #include "interpreter/bytecodeStream.hpp" @@ -34,7 +35,6 @@ #include "oops/methodData.inline.hpp" #include "prims/jvmtiRedefineClasses.hpp" #include "runtime/arguments.hpp" -#include "runtime/compilationPolicy.hpp" #include "runtime/deoptimization.hpp" #include "runtime/handles.inline.hpp" #include "runtime/orderAccess.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/opto/buildOopMap.cpp --- a/src/hotspot/share/opto/buildOopMap.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/opto/buildOopMap.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -352,7 +352,6 @@ } else { // Other - some reaching non-oop value - omap->set_value( r); #ifdef ASSERT if( t->isa_rawptr() && C->cfg()->_raw_oops.member(def) ) { def->dump(); @@ -377,11 +376,18 @@ #endif #ifdef ASSERT - for( OopMapStream oms1(omap, OopMapValue::derived_oop_value); !oms1.is_done(); oms1.next()) { + for( OopMapStream oms1(omap); !oms1.is_done(); oms1.next()) { OopMapValue omv1 = oms1.current(); + if (omv1.type() != OopMapValue::derived_oop_value) { + continue; + } bool found = false; - for( OopMapStream oms2(omap,OopMapValue::oop_value); !oms2.is_done(); oms2.next()) { - if( omv1.content_reg() == oms2.current().reg() ) { + for( OopMapStream oms2(omap); !oms2.is_done(); oms2.next()) { + OopMapValue omv2 = oms2.current(); + if (omv2.type() != OopMapValue::oop_value) { + continue; + } + if( omv1.content_reg() == omv2.reg() ) { found = true; break; } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/opto/c2compiler.cpp --- a/src/hotspot/share/opto/c2compiler.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/opto/c2compiler.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -102,7 +102,8 @@ assert(is_initialized(), "Compiler thread must be initialized"); bool subsume_loads = SubsumeLoads; - bool do_escape_analysis = DoEscapeAnalysis && !env->should_retain_local_variables(); + bool do_escape_analysis = DoEscapeAnalysis && !env->should_retain_local_variables() + && !env->jvmti_can_get_owned_monitor_info(); bool eliminate_boxing = EliminateAutoBox; while (!env->failing()) { diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/opto/classes.cpp --- a/src/hotspot/share/opto/classes.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/opto/classes.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -48,9 +48,6 @@ #include "opto/subnode.hpp" #include "opto/vectornode.hpp" #include "utilities/macros.hpp" -#if INCLUDE_ZGC -#include "gc/z/c2/zBarrierSetC2.hpp" -#endif #if INCLUDE_SHENANDOAHGC #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp" #endif diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/opto/classes.hpp --- a/src/hotspot/share/opto/classes.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/opto/classes.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -193,17 +193,6 @@ macro(LoadN) macro(LoadRange) macro(LoadS) -#if INCLUDE_ZGC -#define zgcmacro(x) macro(x) -#else -#define zgcmacro(x) optionalmacro(x) -#endif -zgcmacro(LoadBarrier) -zgcmacro(LoadBarrierSlowReg) -zgcmacro(ZCompareAndSwapP) -zgcmacro(ZWeakCompareAndSwapP) -zgcmacro(ZCompareAndExchangeP) -zgcmacro(ZGetAndSetP) macro(Lock) macro(Loop) macro(LoopLimit) diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/opto/compile.cpp --- a/src/hotspot/share/opto/compile.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/opto/compile.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -76,9 +76,6 @@ #include "utilities/align.hpp" #include "utilities/copy.hpp" #include "utilities/macros.hpp" -#if INCLUDE_ZGC -#include "gc/z/c2/zBarrierSetC2.hpp" -#endif // -------------------- Compile::mach_constant_base_node ----------------------- @@ -990,6 +987,7 @@ _has_method_handle_invokes(false), _clinit_barrier_on_entry(false), _comp_arena(mtCompiler), + _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())), _env(ci_env), _directive(directive), _log(ci_env->log()), @@ -2412,13 +2410,6 @@ print_method(PHASE_MACRO_EXPANSION, 2); } -#ifdef ASSERT - bs->verify_gc_barriers(this, BarrierSetC2::BeforeLateInsertion); -#endif - - bs->barrier_insertion_phase(C, igvn); - if (failing()) return; - { TracePhase tp("barrierExpand", &timers[_t_barrierExpand]); if (bs->expand_barriers(this, igvn)) { diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/opto/compile.hpp --- a/src/hotspot/share/opto/compile.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/opto/compile.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -55,7 +55,6 @@ class IdealGraphPrinter; class InlineTree; class Int_Array; -class LoadBarrierNode; class Matcher; class MachConstantNode; class MachConstantBaseNode; @@ -96,7 +95,6 @@ LoopOptsNone, LoopOptsShenandoahExpand, LoopOptsShenandoahPostExpand, - LoopOptsZBarrierInsertion, LoopOptsSkipSplitIf, LoopOptsVerify }; @@ -1186,11 +1184,7 @@ bool in_scratch_emit_size() const { return _in_scratch_emit_size; } enum ScratchBufferBlob { -#if defined(PPC64) MAX_inst_size = 2048, -#else - MAX_inst_size = 1024, -#endif MAX_locs_size = 128, // number of relocInfo elements MAX_const_size = 128, MAX_stubs_size = 128 @@ -1265,14 +1259,30 @@ // Process an OopMap Element while emitting nodes void Process_OopMap_Node(MachNode *mach, int code_offset); + class BufferSizingData { + public: + int _stub; + int _code; + int _const; + int _reloc; + + BufferSizingData() : + _stub(0), + _code(0), + _const(0), + _reloc(0) + { }; + }; + // Initialize code buffer - CodeBuffer* init_buffer(uint* blk_starts); + void estimate_buffer_size(int& const_req); + CodeBuffer* init_buffer(BufferSizingData& buf_sizes); // Write out basic block data to code buffer void fill_buffer(CodeBuffer* cb, uint* blk_starts); // Determine which variable sized branches can be shortened - void shorten_branches(uint* blk_starts, int& code_size, int& reloc_size, int& stub_size); + void shorten_branches(uint* blk_starts, BufferSizingData& buf_sizes); // Compute the size of first NumberOfLoopInstrToAlign instructions // at the head of a loop. diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/opto/library_call.cpp --- a/src/hotspot/share/opto/library_call.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/opto/library_call.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -32,6 +32,7 @@ #include "gc/shared/barrierSet.hpp" #include "jfr/support/jfrIntrinsics.hpp" #include "memory/resourceArea.hpp" +#include "oops/klass.inline.hpp" #include "oops/objArrayKlass.hpp" #include "opto/addnode.hpp" #include "opto/arraycopynode.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/opto/loopTransform.cpp --- a/src/hotspot/share/opto/loopTransform.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/opto/loopTransform.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -3129,6 +3129,13 @@ // We also need to replace the original limit to collapse loop exit. Node* cmp = cl->loopexit()->cmp_node(); assert(cl->limit() == cmp->in(2), "sanity"); + // Duplicate cmp node if it has other users + if (cmp->outcnt() > 1) { + cmp = cmp->clone(); + cmp = phase->_igvn.register_new_node_with_optimizer(cmp); + BoolNode *bol = cl->loopexit()->in(CountedLoopEndNode::TestValue)->as_Bool(); + phase->_igvn.replace_input_of(bol, 1, cmp); // put bol on worklist + } phase->_igvn._worklist.push(cmp->in(2)); // put limit on worklist phase->_igvn.replace_input_of(cmp, 2, exact_limit); // put cmp on worklist } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/opto/loopnode.cpp --- a/src/hotspot/share/opto/loopnode.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/opto/loopnode.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -993,18 +993,6 @@ } } - if (UseZGC && !inner_out->in(0)->is_CountedLoopEnd()) { - // In some very special cases there can be a load that has no other uses than the - // counted loop safepoint. Then its loadbarrier will be placed between the inner - // loop exit and the safepoint. This is very rare - - Node* ifnode = inner_out->in(1)->in(0); - // Region->IfTrue->If == Region->Iffalse->If - if (ifnode == inner_out->in(2)->in(0)) { - inner_out = ifnode->in(0); - } - } - CountedLoopEndNode* cle = inner_out->in(0)->as_CountedLoopEnd(); assert(cle == inner->loopexit_or_null(), "mismatch"); bool has_skeleton = outer_le->in(1)->bottom_type()->singleton() && outer_le->in(1)->bottom_type()->is_int()->get_con() == 0; diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/opto/loopopts.cpp --- a/src/hotspot/share/opto/loopopts.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/opto/loopopts.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -41,9 +41,6 @@ #include "opto/rootnode.hpp" #include "opto/subnode.hpp" #include "utilities/macros.hpp" -#if INCLUDE_ZGC -#include "gc/z/c2/zBarrierSetC2.hpp" -#endif //============================================================================= //------------------------------split_thru_phi--------------------------------- diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/opto/machnode.hpp --- a/src/hotspot/share/opto/machnode.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/opto/machnode.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -197,7 +197,7 @@ // ADLC inherit from this class. class MachNode : public Node { public: - MachNode() : Node((uint)0), _num_opnds(0), _opnds(NULL) { + MachNode() : Node((uint)0), _barrier(0), _num_opnds(0), _opnds(NULL) { init_class_id(Class_Mach); } // Required boilerplate @@ -211,6 +211,9 @@ // no constant base node input. virtual uint mach_constant_base_node_input() const { return (uint)-1; } + uint8_t barrier_data() const { return _barrier; } + void set_barrier_data(uint data) { _barrier = data; } + // Copy inputs and operands to new node of instruction. // Called from cisc_version() and short_branch_version(). // !!!! The method's body is defined in ad_.cpp file. @@ -255,6 +258,9 @@ // output have choices - but they must use the same choice. virtual uint two_adr( ) const { return 0; } + // The GC might require some barrier metadata for machine code emission. + uint8_t _barrier; + // Array of complex operand pointers. Each corresponds to zero or // more leafs. Must be set by MachNode constructor to point to an // internal array of MachOpers. The MachOper array is sized by diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/opto/matcher.cpp --- a/src/hotspot/share/opto/matcher.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/opto/matcher.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -1751,6 +1751,13 @@ _shared_nodes.map(leaf->_idx, ex); } + // Have mach nodes inherit GC barrier data + if (leaf->is_LoadStore()) { + mach->set_barrier_data(leaf->as_LoadStore()->barrier_data()); + } else if (leaf->is_Mem()) { + mach->set_barrier_data(leaf->as_Mem()->barrier_data()); + } + return ex; } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/opto/memnode.cpp --- a/src/hotspot/share/opto/memnode.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/opto/memnode.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -49,9 +49,6 @@ #include "utilities/copy.hpp" #include "utilities/macros.hpp" #include "utilities/vmError.hpp" -#if INCLUDE_ZGC -#include "gc/z/c2/zBarrierSetC2.hpp" -#endif // Portions of code courtesy of Clifford Click @@ -2851,7 +2848,7 @@ : Node(required), _type(rt), _adr_type(at), - _has_barrier(false) + _barrier(0) { init_req(MemNode::Control, c ); init_req(MemNode::Memory , mem); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/opto/memnode.hpp --- a/src/hotspot/share/opto/memnode.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/opto/memnode.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -43,6 +43,8 @@ bool _unaligned_access; // Unaligned access from unsafe bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance bool _unsafe_access; // Access of unsafe origin. + uint8_t _barrier; // Bit field with barrier information + protected: #ifdef ASSERT const TypePtr* _adr_type; // What kind of memory is being addressed? @@ -62,18 +64,30 @@ unset // The memory ordering is not set (used for testing) } MemOrd; protected: - MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) - : Node(c0,c1,c2 ), _unaligned_access(false), _mismatched_access(false), _unsafe_access(false) { + MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) : + Node(c0,c1,c2), + _unaligned_access(false), + _mismatched_access(false), + _unsafe_access(false), + _barrier(0) { init_class_id(Class_Mem); debug_only(_adr_type=at; adr_type();) } - MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) - : Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false), _unsafe_access(false) { + MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) : + Node(c0,c1,c2,c3), + _unaligned_access(false), + _mismatched_access(false), + _unsafe_access(false), + _barrier(0) { init_class_id(Class_Mem); debug_only(_adr_type=at; adr_type();) } - MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) - : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false), _unsafe_access(false) { + MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) : + Node(c0,c1,c2,c3,c4), + _unaligned_access(false), + _mismatched_access(false), + _unsafe_access(false), + _barrier(0) { init_class_id(Class_Mem); debug_only(_adr_type=at; adr_type();) } @@ -125,6 +139,9 @@ #endif } + uint8_t barrier_data() { return _barrier; } + void set_barrier_data(uint8_t barrier_data) { _barrier = barrier_data; } + // Search through memory states which precede this node (load or store). // Look for an exact match for the address, with no intervening // aliased stores. @@ -181,8 +198,6 @@ // this field. const MemOrd _mo; - uint _barrier; // Bit field with barrier information - AllocateNode* is_new_object_mark_load(PhaseGVN *phase) const; protected: @@ -196,7 +211,7 @@ public: LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency) - : MemNode(c,mem,adr,at), _control_dependency(control_dependency), _mo(mo), _barrier(0), _type(rt) { + : MemNode(c,mem,adr,at), _control_dependency(control_dependency), _mo(mo), _type(rt) { init_class_id(Class_Load); } inline bool is_unordered() const { return !is_acquire(); } @@ -265,10 +280,6 @@ Node* convert_to_unsigned_load(PhaseGVN& gvn); Node* convert_to_signed_load(PhaseGVN& gvn); - void copy_barrier_info(const Node* src) { _barrier = src->as_Load()->_barrier; } - uint barrier_data() { return _barrier; } - void set_barrier_data(uint barrier_data) { _barrier |= barrier_data; } - void pin() { _control_dependency = Pinned; } bool has_unknown_control_dependency() const { return _control_dependency == UnknownControl; } @@ -820,7 +831,7 @@ private: const Type* const _type; // What kind of value is loaded? const TypePtr* _adr_type; // What kind of memory is being addressed? - bool _has_barrier; + uint8_t _barrier; // Bit field with barrier information virtual uint size_of() const; // Size is bigger public: LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required ); @@ -833,8 +844,9 @@ bool result_not_used() const; MemBarNode* trailing_membar() const; - void set_has_barrier() { _has_barrier = true; }; - bool has_barrier() const { return _has_barrier; }; + + uint8_t barrier_data() { return _barrier; } + void set_barrier_data(uint8_t barrier_data) { _barrier = barrier_data; } }; class LoadStoreConditionalNode : public LoadStoreNode { @@ -886,6 +898,7 @@ MemNode::MemOrd order() const { return _mem_ord; } + virtual uint size_of() const { return sizeof(*this); } }; class CompareAndExchangeNode : public LoadStoreNode { @@ -903,6 +916,7 @@ MemNode::MemOrd order() const { return _mem_ord; } + virtual uint size_of() const { return sizeof(*this); } }; //------------------------------CompareAndSwapBNode--------------------------- diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/opto/node.cpp --- a/src/hotspot/share/opto/node.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/opto/node.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -546,9 +546,6 @@ if (n->is_SafePoint()) { n->as_SafePoint()->clone_replaced_nodes(); } - if (n->is_Load()) { - n->as_Load()->copy_barrier_info(this); - } return n; // Return the clone } @@ -1473,10 +1470,6 @@ if (req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0) { return false; } - BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); - if (!bs->needs_anti_dependence_check(this)) { - return false; - } return in(1)->bottom_type()->has_memory(); } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/opto/node.hpp --- a/src/hotspot/share/opto/node.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/opto/node.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -83,8 +83,6 @@ class JumpNode; class JumpProjNode; class LoadNode; -class LoadBarrierNode; -class LoadBarrierSlowRegNode; class LoadStoreNode; class LoadStoreConditionalNode; class LockNode; @@ -642,7 +640,6 @@ DEFINE_CLASS_ID(MemBar, Multi, 3) DEFINE_CLASS_ID(Initialize, MemBar, 0) DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1) - DEFINE_CLASS_ID(LoadBarrier, Multi, 4) DEFINE_CLASS_ID(Mach, Node, 1) DEFINE_CLASS_ID(MachReturn, Mach, 0) @@ -679,7 +676,6 @@ DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6) DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0) DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1) - DEFINE_CLASS_ID(LoadBarrierSlowReg, Type, 7) DEFINE_CLASS_ID(Proj, Node, 3) DEFINE_CLASS_ID(CatchProj, Proj, 0) @@ -836,8 +832,6 @@ DEFINE_CLASS_QUERY(Load) DEFINE_CLASS_QUERY(LoadStore) DEFINE_CLASS_QUERY(LoadStoreConditional) - DEFINE_CLASS_QUERY(LoadBarrier) - DEFINE_CLASS_QUERY(LoadBarrierSlowReg) DEFINE_CLASS_QUERY(Lock) DEFINE_CLASS_QUERY(Loop) DEFINE_CLASS_QUERY(Mach) diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/opto/output.cpp --- a/src/hotspot/share/opto/output.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/opto/output.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -31,6 +31,8 @@ #include "compiler/compileBroker.hpp" #include "compiler/compilerDirectives.hpp" #include "compiler/oopMap.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shared/c2/barrierSetC2.hpp" #include "memory/allocation.inline.hpp" #include "opto/ad.hpp" #include "opto/callnode.hpp" @@ -114,35 +116,33 @@ } } + // Keeper of sizing aspects + BufferSizingData buf_sizes = BufferSizingData(); + + // Initialize code buffer + estimate_buffer_size(buf_sizes._const); + if (failing()) return; + + // Pre-compute the length of blocks and replace + // long branches with short if machine supports it. + // Must be done before ScheduleAndBundle due to SPARC delay slots uint* blk_starts = NEW_RESOURCE_ARRAY(uint, _cfg->number_of_blocks() + 1); blk_starts[0] = 0; - - // Initialize code buffer and process short branches. - CodeBuffer* cb = init_buffer(blk_starts); - - if (cb == NULL || failing()) { - return; - } + shorten_branches(blk_starts, buf_sizes); ScheduleAndBundle(); - -#ifndef PRODUCT - if (trace_opto_output()) { - tty->print("\n---- After ScheduleAndBundle ----\n"); - for (uint i = 0; i < _cfg->number_of_blocks(); i++) { - tty->print("\nBB#%03d:\n", i); - Block* block = _cfg->get_block(i); - for (uint j = 0; j < block->number_of_nodes(); j++) { - Node* n = block->get_node(j); - OptoReg::Name reg = _regalloc->get_reg_first(n); - tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : ""); - n->dump(); - } - } + if (failing()) { + return; } -#endif - - if (failing()) { + + // Late barrier analysis must be done after schedule and bundle + // Otherwise liveness based spilling will fail + BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); + bs->late_barrier_analysis(); + + // Complete sizing of codebuffer + CodeBuffer* cb = init_buffer(buf_sizes); + if (cb == NULL || failing()) { return; } @@ -223,7 +223,7 @@ // The architecture description provides short branch variants for some long // branch instructions. Replace eligible long branches with short branches. -void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size, int& stub_size) { +void Compile::shorten_branches(uint* blk_starts, BufferSizingData& buf_sizes) { // Compute size of each block, method size, and relocation information size uint nblocks = _cfg->number_of_blocks(); @@ -241,11 +241,11 @@ bool has_short_branch_candidate = false; // Initialize the sizes to 0 - code_size = 0; // Size in bytes of generated code - stub_size = 0; // Size in bytes of all stub entries + int code_size = 0; // Size in bytes of generated code + int stub_size = 0; // Size in bytes of all stub entries // Size in bytes of all relocation entries, including those in local stubs. // Start with 2-bytes of reloc info for the unvalidated entry point - reloc_size = 1; // Number of relocation entries + int reloc_size = 1; // Number of relocation entries // Make three passes. The first computes pessimistic blk_starts, // relative jmp_offset and reloc_size information. The second performs @@ -479,6 +479,10 @@ // a relocation index. // The CodeBuffer will expand the locs array if this estimate is too low. reloc_size *= 10 / sizeof(relocInfo); + + buf_sizes._reloc = reloc_size; + buf_sizes._code = code_size; + buf_sizes._stub = stub_size; } //------------------------------FillLocArray----------------------------------- @@ -490,8 +494,8 @@ // This should never have accepted Bad before assert(OptoReg::is_valid(regnum), "location must be valid"); return (OptoReg::is_reg(regnum)) - ? new LocationValue(Location::new_reg_loc(l_type, OptoReg::as_VMReg(regnum)) ) - : new LocationValue(Location::new_stk_loc(l_type, ra->reg2offset(regnum))); + ? new LocationValue(Location::new_reg_loc(l_type, OptoReg::as_VMReg(regnum)) ) + : new LocationValue(Location::new_stk_loc(l_type, ra->reg2offset(regnum))); } @@ -610,12 +614,12 @@ } #endif //_LP64 else if( (t->base() == Type::FloatBot || t->base() == Type::FloatCon) && - OptoReg::is_reg(regnum) ) { + OptoReg::is_reg(regnum) ) { array->append(new_loc_value( _regalloc, regnum, Matcher::float_in_double() - ? Location::float_in_dbl : Location::normal )); + ? Location::float_in_dbl : Location::normal )); } else if( t->base() == Type::Int && OptoReg::is_reg(regnum) ) { array->append(new_loc_value( _regalloc, regnum, Matcher::int_in_long - ? Location::int_in_long : Location::normal )); + ? Location::int_in_long : Location::normal )); } else if( t->base() == Type::NarrowOop ) { array->append(new_loc_value( _regalloc, regnum, Location::narrowoop )); } else { @@ -626,48 +630,48 @@ // No register. It must be constant data. switch (t->base()) { - case Type::Half: // Second half of a double - ShouldNotReachHere(); // Caller should skip 2nd halves - break; - case Type::AnyPtr: - array->append(new ConstantOopWriteValue(NULL)); - break; - case Type::AryPtr: - case Type::InstPtr: // fall through - array->append(new ConstantOopWriteValue(t->isa_oopptr()->const_oop()->constant_encoding())); - break; - case Type::NarrowOop: - if (t == TypeNarrowOop::NULL_PTR) { + case Type::Half: // Second half of a double + ShouldNotReachHere(); // Caller should skip 2nd halves + break; + case Type::AnyPtr: array->append(new ConstantOopWriteValue(NULL)); - } else { - array->append(new ConstantOopWriteValue(t->make_ptr()->isa_oopptr()->const_oop()->constant_encoding())); + break; + case Type::AryPtr: + case Type::InstPtr: // fall through + array->append(new ConstantOopWriteValue(t->isa_oopptr()->const_oop()->constant_encoding())); + break; + case Type::NarrowOop: + if (t == TypeNarrowOop::NULL_PTR) { + array->append(new ConstantOopWriteValue(NULL)); + } else { + array->append(new ConstantOopWriteValue(t->make_ptr()->isa_oopptr()->const_oop()->constant_encoding())); + } + break; + case Type::Int: + array->append(new ConstantIntValue(t->is_int()->get_con())); + break; + case Type::RawPtr: + // A return address (T_ADDRESS). + assert((intptr_t)t->is_ptr()->get_con() < (intptr_t)0x10000, "must be a valid BCI"); +#ifdef _LP64 + // Must be restored to the full-width 64-bit stack slot. + array->append(new ConstantLongValue(t->is_ptr()->get_con())); +#else + array->append(new ConstantIntValue(t->is_ptr()->get_con())); +#endif + break; + case Type::FloatCon: { + float f = t->is_float_constant()->getf(); + array->append(new ConstantIntValue(jint_cast(f))); + break; } - break; - case Type::Int: - array->append(new ConstantIntValue(t->is_int()->get_con())); - break; - case Type::RawPtr: - // A return address (T_ADDRESS). - assert((intptr_t)t->is_ptr()->get_con() < (intptr_t)0x10000, "must be a valid BCI"); + case Type::DoubleCon: { + jdouble d = t->is_double_constant()->getd(); #ifdef _LP64 - // Must be restored to the full-width 64-bit stack slot. - array->append(new ConstantLongValue(t->is_ptr()->get_con())); + array->append(new ConstantIntValue((jint)0)); + array->append(new ConstantDoubleValue(d)); #else - array->append(new ConstantIntValue(t->is_ptr()->get_con())); -#endif - break; - case Type::FloatCon: { - float f = t->is_float_constant()->getf(); - array->append(new ConstantIntValue(jint_cast(f))); - break; - } - case Type::DoubleCon: { - jdouble d = t->is_double_constant()->getd(); -#ifdef _LP64 - array->append(new ConstantIntValue((jint)0)); - array->append(new ConstantDoubleValue(d)); -#else - // Repack the double as two jints. + // Repack the double as two jints. // The convention the interpreter uses is that the second local // holds the first raw word of the native double representation. // This is actually reasonable, since locals and stack arrays @@ -679,15 +683,15 @@ array->append(new ConstantIntValue(acc.words[1])); array->append(new ConstantIntValue(acc.words[0])); #endif - break; - } - case Type::Long: { - jlong d = t->is_long()->get_con(); + break; + } + case Type::Long: { + jlong d = t->is_long()->get_con(); #ifdef _LP64 - array->append(new ConstantIntValue((jint)0)); - array->append(new ConstantLongValue(d)); + array->append(new ConstantIntValue((jint)0)); + array->append(new ConstantLongValue(d)); #else - // Repack the long as two jints. + // Repack the long as two jints. // The convention the interpreter uses is that the second local // holds the first raw word of the native double representation. // This is actually reasonable, since locals and stack arrays @@ -699,14 +703,14 @@ array->append(new ConstantIntValue(acc.words[1])); array->append(new ConstantIntValue(acc.words[0])); #endif - break; - } - case Type::Top: // Add an illegal value here - array->append(new LocationValue(Location())); - break; - default: - ShouldNotReachHere(); - break; + break; + } + case Type::Top: // Add an illegal value here + array->append(new LocationValue(Location())); + break; + default: + ShouldNotReachHere(); + break; } } @@ -871,58 +875,58 @@ // A simplified version of Process_OopMap_Node, to handle non-safepoints. class NonSafepointEmitter { - Compile* C; - JVMState* _pending_jvms; - int _pending_offset; - - void emit_non_safepoint(); + Compile* C; + JVMState* _pending_jvms; + int _pending_offset; + + void emit_non_safepoint(); public: - NonSafepointEmitter(Compile* compile) { - this->C = compile; - _pending_jvms = NULL; - _pending_offset = 0; - } - - void observe_instruction(Node* n, int pc_offset) { - if (!C->debug_info()->recording_non_safepoints()) return; - - Node_Notes* nn = C->node_notes_at(n->_idx); - if (nn == NULL || nn->jvms() == NULL) return; - if (_pending_jvms != NULL && - _pending_jvms->same_calls_as(nn->jvms())) { - // Repeated JVMS? Stretch it up here. - _pending_offset = pc_offset; - } else { + NonSafepointEmitter(Compile* compile) { + this->C = compile; + _pending_jvms = NULL; + _pending_offset = 0; + } + + void observe_instruction(Node* n, int pc_offset) { + if (!C->debug_info()->recording_non_safepoints()) return; + + Node_Notes* nn = C->node_notes_at(n->_idx); + if (nn == NULL || nn->jvms() == NULL) return; if (_pending_jvms != NULL && + _pending_jvms->same_calls_as(nn->jvms())) { + // Repeated JVMS? Stretch it up here. + _pending_offset = pc_offset; + } else { + if (_pending_jvms != NULL && + _pending_offset < pc_offset) { + emit_non_safepoint(); + } + _pending_jvms = NULL; + if (pc_offset > C->debug_info()->last_pc_offset()) { + // This is the only way _pending_jvms can become non-NULL: + _pending_jvms = nn->jvms(); + _pending_offset = pc_offset; + } + } + } + + // Stay out of the way of real safepoints: + void observe_safepoint(JVMState* jvms, int pc_offset) { + if (_pending_jvms != NULL && + !_pending_jvms->same_calls_as(jvms) && _pending_offset < pc_offset) { emit_non_safepoint(); } _pending_jvms = NULL; - if (pc_offset > C->debug_info()->last_pc_offset()) { - // This is the only way _pending_jvms can become non-NULL: - _pending_jvms = nn->jvms(); - _pending_offset = pc_offset; - } } - } - - // Stay out of the way of real safepoints: - void observe_safepoint(JVMState* jvms, int pc_offset) { - if (_pending_jvms != NULL && - !_pending_jvms->same_calls_as(jvms) && - _pending_offset < pc_offset) { - emit_non_safepoint(); + + void flush_at_end() { + if (_pending_jvms != NULL) { + emit_non_safepoint(); + } + _pending_jvms = NULL; } - _pending_jvms = NULL; - } - - void flush_at_end() { - if (_pending_jvms != NULL) { - emit_non_safepoint(); - } - _pending_jvms = NULL; - } }; void NonSafepointEmitter::emit_non_safepoint() { @@ -952,15 +956,11 @@ } //------------------------------init_buffer------------------------------------ -CodeBuffer* Compile::init_buffer(uint* blk_starts) { +void Compile::estimate_buffer_size(int& const_req) { // Set the initially allocated size - int code_req = initial_code_capacity; - int locs_req = initial_locs_capacity; - int stub_req = initial_stub_capacity; - int const_req = initial_const_capacity; - - int pad_req = NativeCall::instruction_size; + const_req = initial_const_capacity; + // The extra spacing after the code is necessary on some platforms. // Sometimes we need to patch in a jump after the last instruction, // if the nmethod has been deoptimized. (See 4932387, 4894843.) @@ -972,7 +972,7 @@ // Compute prolog code size _method_size = 0; - _frame_slots = OptoReg::reg2stack(_matcher->_old_SP)+_regalloc->_framesize; + _frame_slots = OptoReg::reg2stack(_matcher->_old_SP) + _regalloc->_framesize; #if defined(IA64) && !defined(AIX) if (save_argument_registers()) { // 4815101: this is a stub with implicit and unknown precision fp args. @@ -1021,11 +1021,18 @@ // Initialize the space for the BufferBlob used to find and verify // instruction size in MachNode::emit_size() init_scratch_buffer_blob(const_req); - if (failing()) return NULL; // Out of memory - - // Pre-compute the length of blocks and replace - // long branches with short if machine supports it. - shorten_branches(blk_starts, code_req, locs_req, stub_req); +} + +CodeBuffer* Compile::init_buffer(BufferSizingData& buf_sizes) { + + int stub_req = buf_sizes._stub; + int code_req = buf_sizes._code; + int const_req = buf_sizes._const; + + int pad_req = NativeCall::instruction_size; + + BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); + stub_req += bs->estimate_stub_size(); // nmethod and CodeBuffer count stubs & constants as part of method's code. // class HandlerImpl is platform-specific and defined in the *.ad files. @@ -1038,18 +1045,18 @@ code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion int total_req = - const_req + - code_req + - pad_req + - stub_req + - exception_handler_req + - deopt_handler_req; // deopt handler + const_req + + code_req + + pad_req + + stub_req + + exception_handler_req + + deopt_handler_req; // deopt handler if (has_method_handle_invokes()) total_req += deopt_handler_req; // deopt MH handler CodeBuffer* cb = code_buffer(); - cb->initialize(total_req, locs_req); + cb->initialize(total_req, buf_sizes._reloc); // Have we run out of code space? if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) { @@ -1268,12 +1275,12 @@ Process_OopMap_Node(mach, current_offset); } // End if safepoint - // If this is a null check, then add the start of the previous instruction to the list + // If this is a null check, then add the start of the previous instruction to the list else if( mach->is_MachNullCheck() ) { inct_starts[inct_cnt++] = previous_offset; } - // If this is a branch, then fill in the label with the target BB's label + // If this is a branch, then fill in the label with the target BB's label else if (mach->is_MachBranch()) { // This requires the TRUE branch target be in succs[0] uint block_num = block->non_connector_successor(0)->_pre_order; @@ -1284,8 +1291,8 @@ bool delay_slot_is_used = valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay(); if (!delay_slot_is_used && mach->may_be_short_branch()) { - assert(delay_slot == NULL, "not expecting delay slot node"); - int br_size = n->size(_regalloc); + assert(delay_slot == NULL, "not expecting delay slot node"); + int br_size = n->size(_regalloc); int offset = blk_starts[block_num] - current_offset; if (block_num >= i) { // Current and following block's offset are not @@ -1343,7 +1350,7 @@ } } #ifdef ASSERT - // Check that oop-store precedes the card-mark + // Check that oop-store precedes the card-mark else if (mach->ideal_Opcode() == Op_StoreCM) { uint storeCM_idx = j; int count = 0; @@ -1514,6 +1521,10 @@ } #endif + BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); + bs->emit_stubs(*cb); + if (failing()) return; + #ifndef PRODUCT // Information on the size of the method, without the extraneous code Scheduling::increment_method_size(cb->insts_size()); @@ -1688,20 +1699,20 @@ // Initializer for class Scheduling Scheduling::Scheduling(Arena *arena, Compile &compile) - : _arena(arena), - _cfg(compile.cfg()), - _regalloc(compile.regalloc()), - _scheduled(arena), - _available(arena), - _reg_node(arena), - _pinch_free_list(arena), - _next_node(NULL), - _bundle_instr_count(0), - _bundle_cycle_number(0), - _bundle_use(0, 0, resource_count, &_bundle_use_elements[0]) + : _arena(arena), + _cfg(compile.cfg()), + _regalloc(compile.regalloc()), + _scheduled(arena), + _available(arena), + _reg_node(arena), + _pinch_free_list(arena), + _next_node(NULL), + _bundle_instr_count(0), + _bundle_cycle_number(0), + _bundle_use(0, 0, resource_count, &_bundle_use_elements[0]) #ifndef PRODUCT - , _branches(0) - , _unconditional_delays(0) + , _branches(0) + , _unconditional_delays(0) #endif { // Create a MachNopNode @@ -1782,8 +1793,8 @@ _bundle_use.reset(); memcpy(_bundle_use_elements, - Pipeline_Use::elaborated_elements, - sizeof(Pipeline_Use::elaborated_elements)); + Pipeline_Use::elaborated_elements, + sizeof(Pipeline_Use::elaborated_elements)); } // Perform instruction scheduling and bundling over the sequence of @@ -1810,6 +1821,22 @@ // Walk backwards over each basic block, computing the needed alignment // Walk over all the basic blocks scheduling.DoScheduling(); + +#ifndef PRODUCT + if (trace_opto_output()) { + tty->print("\n---- After ScheduleAndBundle ----\n"); + for (uint i = 0; i < _cfg->number_of_blocks(); i++) { + tty->print("\nBB#%03d:\n", i); + Block* block = _cfg->get_block(i); + for (uint j = 0; j < block->number_of_nodes(); j++) { + Node* n = block->get_node(j); + OptoReg::Name reg = _regalloc->get_reg_first(n); + tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : ""); + n->dump(); + } + } + } +#endif } // Compute the latency of all the instructions. This is fairly simple, @@ -1878,7 +1905,7 @@ #ifndef PRODUCT if (_cfg->C->trace_opto_output()) tty->print("# NodeFitsInBundle [%4d]: FALSE; latency %4d > %d\n", - n->_idx, _current_latency[n_idx], _bundle_cycle_number); + n->_idx, _current_latency[n_idx], _bundle_cycle_number); #endif return (false); } @@ -1895,7 +1922,7 @@ #ifndef PRODUCT if (_cfg->C->trace_opto_output()) tty->print("# NodeFitsInBundle [%4d]: FALSE; too many instructions: %d > %d\n", - n->_idx, _bundle_instr_count + instruction_count, Pipeline::_max_instrs_per_cycle); + n->_idx, _bundle_instr_count + instruction_count, Pipeline::_max_instrs_per_cycle); #endif return (false); } @@ -2103,12 +2130,12 @@ // Don't allow safepoints in the branch shadow, that will // cause a number of difficulties if ( avail_pipeline->instructionCount() == 1 && - !avail_pipeline->hasMultipleBundles() && - !avail_pipeline->hasBranchDelay() && - Pipeline::instr_has_unit_size() && - d->size(_regalloc) == Pipeline::instr_unit_size() && - NodeFitsInBundle(d) && - !node_bundling(d)->used_in_delay()) { + !avail_pipeline->hasMultipleBundles() && + !avail_pipeline->hasBranchDelay() && + Pipeline::instr_has_unit_size() && + d->size(_regalloc) == Pipeline::instr_unit_size() && + NodeFitsInBundle(d) && + !node_bundling(d)->used_in_delay()) { if (d->is_Mach() && !d->is_MachSafePoint()) { // A node that fits in the delay slot was found, so we need to @@ -2153,13 +2180,13 @@ // step of the bundles if (!NodeFitsInBundle(n)) { #ifndef PRODUCT - if (_cfg->C->trace_opto_output()) - tty->print("# *** STEP(branch won't fit) ***\n"); + if (_cfg->C->trace_opto_output()) + tty->print("# *** STEP(branch won't fit) ***\n"); #endif - // Update the state information - _bundle_instr_count = 0; - _bundle_cycle_number += 1; - _bundle_use.step(1); + // Update the state information + _bundle_instr_count = 0; + _bundle_cycle_number += 1; + _bundle_use.step(1); } } @@ -2205,8 +2232,8 @@ #ifndef PRODUCT if (_cfg->C->trace_opto_output()) tty->print("# *** STEP(%d >= %d instructions) ***\n", - instruction_count + _bundle_instr_count, - Pipeline::_max_instrs_per_cycle); + instruction_count + _bundle_instr_count, + Pipeline::_max_instrs_per_cycle); #endif step(1); } @@ -2412,7 +2439,7 @@ } assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, ""); if( last->is_Catch() || - (last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) { + (last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) { // There might be a prior call. Skip it. while (_bb_start < _bb_end && bb->get_node(--_bb_end)->is_MachProj()); } else if( last->is_MachNullCheck() ) { @@ -2482,7 +2509,7 @@ } #endif #ifdef ASSERT - verify_good_schedule(bb,"after block local scheduling"); + verify_good_schedule(bb,"after block local scheduling"); #endif } @@ -2830,31 +2857,31 @@ // void Scheduling::garbage_collect_pinch_nodes() { #ifndef PRODUCT - if (_cfg->C->trace_opto_output()) tty->print("Reclaimed pinch nodes:"); + if (_cfg->C->trace_opto_output()) tty->print("Reclaimed pinch nodes:"); #endif - int trace_cnt = 0; - for (uint k = 0; k < _reg_node.Size(); k++) { - Node* pinch = _reg_node[k]; - if ((pinch != NULL) && pinch->Opcode() == Op_Node && - // no predecence input edges - (pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) { - cleanup_pinch(pinch); - _pinch_free_list.push(pinch); - _reg_node.map(k, NULL); + int trace_cnt = 0; + for (uint k = 0; k < _reg_node.Size(); k++) { + Node* pinch = _reg_node[k]; + if ((pinch != NULL) && pinch->Opcode() == Op_Node && + // no predecence input edges + (pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) { + cleanup_pinch(pinch); + _pinch_free_list.push(pinch); + _reg_node.map(k, NULL); #ifndef PRODUCT - if (_cfg->C->trace_opto_output()) { - trace_cnt++; - if (trace_cnt > 40) { - tty->print("\n"); - trace_cnt = 0; - } - tty->print(" %d", pinch->_idx); + if (_cfg->C->trace_opto_output()) { + trace_cnt++; + if (trace_cnt > 40) { + tty->print("\n"); + trace_cnt = 0; } + tty->print(" %d", pinch->_idx); + } #endif - } } + } #ifndef PRODUCT - if (_cfg->C->trace_opto_output()) tty->print("\n"); + if (_cfg->C->trace_opto_output()) tty->print("\n"); #endif } @@ -2891,19 +2918,19 @@ void Scheduling::print_statistics() { // Print the size added by nops for bundling tty->print("Nops added %d bytes to total of %d bytes", - _total_nop_size, _total_method_size); + _total_nop_size, _total_method_size); if (_total_method_size > 0) tty->print(", for %.2f%%", - ((double)_total_nop_size) / ((double) _total_method_size) * 100.0); + ((double)_total_nop_size) / ((double) _total_method_size) * 100.0); tty->print("\n"); // Print the number of branch shadows filled if (Pipeline::_branch_has_delay_slot) { tty->print("Of %d branches, %d had unconditional delay slots filled", - _total_branches, _total_unconditional_delays); + _total_branches, _total_unconditional_delays); if (_total_branches > 0) tty->print(", for %.2f%%", - ((double)_total_unconditional_delays) / ((double)_total_branches) * 100.0); + ((double)_total_unconditional_delays) / ((double)_total_branches) * 100.0); tty->print("\n"); } @@ -2917,6 +2944,6 @@ if (total_bundles > 0) tty->print("Average ILP (excluding nops) is %.2f\n", - ((double)total_instructions) / ((double)total_bundles)); + ((double)total_instructions) / ((double)total_bundles)); } #endif diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/opto/output.hpp --- a/src/hotspot/share/opto/output.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/opto/output.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -40,7 +40,6 @@ class PhaseChaitin; class Pipeline_Use_Element; class Pipeline_Use; - #ifndef PRODUCT #define DEBUG_ARG(x) , x #else @@ -49,10 +48,7 @@ // Define the initial sizes for allocation of the resizable code buffer enum { - initial_code_capacity = 16 * 1024, - initial_stub_capacity = 4 * 1024, - initial_const_capacity = 4 * 1024, - initial_locs_capacity = 3 * 1024 + initial_const_capacity = 4 * 1024 }; //------------------------------Scheduling---------------------------------- diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/opto/phaseX.cpp --- a/src/hotspot/share/opto/phaseX.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/opto/phaseX.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -1648,14 +1648,14 @@ // of the mirror load depends on the type of 'n'. See LoadNode::Value(). // LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror)))) BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); - bool has_load_barriers = bs->has_load_barriers(); + bool has_load_barrier_nodes = bs->has_load_barrier_nodes(); if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) { for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { Node* u = use->fast_out(i2); const Type* ut = u->bottom_type(); if (u->Opcode() == Op_LoadP && ut->isa_instptr()) { - if (has_load_barriers) { + if (has_load_barrier_nodes) { // Search for load barriers behind the load for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) { Node* b = u->fast_out(i3); @@ -1808,14 +1808,14 @@ // Loading the java mirror from a Klass requires two loads and the type // of the mirror load depends on the type of 'n'. See LoadNode::Value(). BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); - bool has_load_barriers = bs->has_load_barriers(); + bool has_load_barrier_nodes = bs->has_load_barrier_nodes(); if (m_op == Op_LoadP && m->bottom_type()->isa_rawptr()) { for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) { Node* u = m->fast_out(i2); const Type* ut = u->bottom_type(); if (u->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(u)) { - if (has_load_barriers) { + if (has_load_barrier_nodes) { // Search for load barriers behind the load for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) { Node* b = u->fast_out(i3); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/prims/jni.cpp --- a/src/hotspot/share/prims/jni.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/prims/jni.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -65,7 +65,6 @@ #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" #include "runtime/atomic.hpp" -#include "runtime/compilationPolicy.hpp" #include "runtime/fieldDescriptor.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" @@ -88,6 +87,9 @@ #include "utilities/histogram.hpp" #include "utilities/macros.hpp" #include "utilities/vmError.hpp" +#if INCLUDE_JVMCI +#include "jvmci/jvmciCompiler.hpp" +#endif static jint CurrentVersion = JNI_VERSION_10; diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/prims/jniCheck.cpp --- a/src/hotspot/share/prims/jniCheck.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/prims/jniCheck.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -534,10 +534,10 @@ if (obj != NULL) { oop recv = jniCheck::validate_object(thr, obj); assert(recv != NULL, "validate_object checks that"); - Klass* ik = recv->klass(); + Klass* rk = recv->klass(); // Check that the object is a subtype of method holder too. - if (!InstanceKlass::cast(ik)->is_subtype_of(holder)) { + if (!rk->is_subtype_of(holder)) { ReportJNIFatalError(thr, fatal_wrong_class_or_method); } } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/prims/jvmtiEnv.cpp --- a/src/hotspot/share/prims/jvmtiEnv.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/prims/jvmtiEnv.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -3229,23 +3229,23 @@ jvmtiError JvmtiEnv::DestroyRawMonitor(JvmtiRawMonitor * rmonitor) { if (Threads::number_of_threads() == 0) { - // Remove this monitor from pending raw monitors list + // Remove this monitor from pending raw monitors list // if it has entered in onload or start phase. JvmtiPendingMonitors::destroy(rmonitor); } else { Thread* thread = Thread::current(); - if (rmonitor->is_entered(thread)) { + if (rmonitor->owner() == thread) { // The caller owns this monitor which we are about to destroy. // We exit the underlying synchronization object so that the // "delete monitor" call below can work without an assertion // failure on systems that don't like destroying synchronization // objects that are locked. int r; - intptr_t recursion = rmonitor->recursions(); - for (intptr_t i = 0; i <= recursion; i++) { + int recursion = rmonitor->recursions(); + for (int i = 0; i <= recursion; i++) { r = rmonitor->raw_exit(thread); - assert(r == ObjectMonitor::OM_OK, "raw_exit should have worked"); - if (r != ObjectMonitor::OM_OK) { // robustness + assert(r == JvmtiRawMonitor::M_OK, "raw_exit should have worked"); + if (r != JvmtiRawMonitor::M_OK) { // robustness return JVMTI_ERROR_INTERNAL; } } @@ -3271,7 +3271,7 @@ jvmtiError JvmtiEnv::RawMonitorEnter(JvmtiRawMonitor * rmonitor) { if (Threads::number_of_threads() == 0) { - // No JavaThreads exist so ObjectMonitor enter cannot be + // No JavaThreads exist so JvmtiRawMonitor enter cannot be // used, add this raw monitor to the pending list. // The pending monitors will be actually entered when // the VM is setup. @@ -3279,20 +3279,10 @@ // in thread.cpp. JvmtiPendingMonitors::enter(rmonitor); } else { - int r = 0; Thread* thread = Thread::current(); - if (thread->is_Java_thread()) { JavaThread* current_thread = (JavaThread*)thread; -#ifdef PROPER_TRANSITIONS - // Not really unknown but ThreadInVMfromNative does more than we want - ThreadInVMfromUnknown __tiv; - { - ThreadBlockInVM __tbivm(current_thread); - r = rmonitor->raw_enter(current_thread); - } -#else /* Transition to thread_blocked without entering vm state */ /* This is really evil. Normally you can't undo _thread_blocked */ /* transitions like this because it would cause us to miss a */ @@ -3308,22 +3298,11 @@ current_thread->frame_anchor()->walkable(), "Must be walkable"); current_thread->set_thread_state(_thread_blocked); - r = rmonitor->raw_enter(current_thread); + rmonitor->raw_enter(current_thread); // restore state, still at a safepoint safe state current_thread->set_thread_state(state); - -#endif /* PROPER_TRANSITIONS */ - assert(r == ObjectMonitor::OM_OK, "raw_enter should have worked"); } else { - if (thread->is_Named_thread()) { - r = rmonitor->raw_enter(thread); - } else { - ShouldNotReachHere(); - } - } - - if (r != ObjectMonitor::OM_OK) { // robustness - return JVMTI_ERROR_INTERNAL; + rmonitor->raw_enter(thread); } } return JVMTI_ERROR_NONE; @@ -3342,31 +3321,10 @@ err = JVMTI_ERROR_NOT_MONITOR_OWNER; } } else { - int r = 0; Thread* thread = Thread::current(); - - if (thread->is_Java_thread()) { - JavaThread* current_thread = (JavaThread*)thread; -#ifdef PROPER_TRANSITIONS - // Not really unknown but ThreadInVMfromNative does more than we want - ThreadInVMfromUnknown __tiv; -#endif /* PROPER_TRANSITIONS */ - r = rmonitor->raw_exit(current_thread); - } else { - if (thread->is_Named_thread()) { - r = rmonitor->raw_exit(thread); - } else { - ShouldNotReachHere(); - } - } - - if (r == ObjectMonitor::OM_ILLEGAL_MONITOR_STATE) { + int r = rmonitor->raw_exit(thread); + if (r == JvmtiRawMonitor::M_ILLEGAL_MONITOR_STATE) { err = JVMTI_ERROR_NOT_MONITOR_OWNER; - } else { - assert(r == ObjectMonitor::OM_OK, "raw_exit should have worked"); - if (r != ObjectMonitor::OM_OK) { // robustness - err = JVMTI_ERROR_INTERNAL; - } } } return err; @@ -3381,14 +3339,7 @@ if (thread->is_Java_thread()) { JavaThread* current_thread = (JavaThread*)thread; -#ifdef PROPER_TRANSITIONS - // Not really unknown but ThreadInVMfromNative does more than we want - ThreadInVMfromUnknown __tiv; - { - ThreadBlockInVM __tbivm(current_thread); - r = rmonitor->raw_wait(millis, true, current_thread); - } -#else + /* Transition to thread_blocked without entering vm state */ /* This is really evil. Normally you can't undo _thread_blocked */ /* transitions like this because it would cause us to miss a */ @@ -3408,57 +3359,31 @@ // restore state, still at a safepoint safe state current_thread->set_thread_state(state); -#endif /* PROPER_TRANSITIONS */ } else { - if (thread->is_Named_thread()) { r = rmonitor->raw_wait(millis, false, thread); - } else { - ShouldNotReachHere(); - } + assert(r != JvmtiRawMonitor::M_INTERRUPTED, "non-JavaThread can't be interrupted"); } switch (r) { - case ObjectMonitor::OM_INTERRUPTED: + case JvmtiRawMonitor::M_INTERRUPTED: return JVMTI_ERROR_INTERRUPT; - case ObjectMonitor::OM_ILLEGAL_MONITOR_STATE: + case JvmtiRawMonitor::M_ILLEGAL_MONITOR_STATE: return JVMTI_ERROR_NOT_MONITOR_OWNER; + default: + return JVMTI_ERROR_NONE; } - assert(r == ObjectMonitor::OM_OK, "raw_wait should have worked"); - if (r != ObjectMonitor::OM_OK) { // robustness - return JVMTI_ERROR_INTERNAL; - } - - return JVMTI_ERROR_NONE; } /* end RawMonitorWait */ // rmonitor - pre-checked for validity jvmtiError JvmtiEnv::RawMonitorNotify(JvmtiRawMonitor * rmonitor) { - int r = 0; Thread* thread = Thread::current(); - - if (thread->is_Java_thread()) { - JavaThread* current_thread = (JavaThread*)thread; - // Not really unknown but ThreadInVMfromNative does more than we want - ThreadInVMfromUnknown __tiv; - r = rmonitor->raw_notify(current_thread); - } else { - if (thread->is_Named_thread()) { - r = rmonitor->raw_notify(thread); - } else { - ShouldNotReachHere(); - } - } - - if (r == ObjectMonitor::OM_ILLEGAL_MONITOR_STATE) { + int r = rmonitor->raw_notify(thread); + + if (r == JvmtiRawMonitor::M_ILLEGAL_MONITOR_STATE) { return JVMTI_ERROR_NOT_MONITOR_OWNER; } - assert(r == ObjectMonitor::OM_OK, "raw_notify should have worked"); - if (r != ObjectMonitor::OM_OK) { // robustness - return JVMTI_ERROR_INTERNAL; - } - return JVMTI_ERROR_NONE; } /* end RawMonitorNotify */ @@ -3466,29 +3391,12 @@ // rmonitor - pre-checked for validity jvmtiError JvmtiEnv::RawMonitorNotifyAll(JvmtiRawMonitor * rmonitor) { - int r = 0; Thread* thread = Thread::current(); - - if (thread->is_Java_thread()) { - JavaThread* current_thread = (JavaThread*)thread; - ThreadInVMfromUnknown __tiv; - r = rmonitor->raw_notifyAll(current_thread); - } else { - if (thread->is_Named_thread()) { - r = rmonitor->raw_notifyAll(thread); - } else { - ShouldNotReachHere(); - } - } - - if (r == ObjectMonitor::OM_ILLEGAL_MONITOR_STATE) { + int r = rmonitor->raw_notifyAll(thread); + + if (r == JvmtiRawMonitor::M_ILLEGAL_MONITOR_STATE) { return JVMTI_ERROR_NOT_MONITOR_OWNER; } - assert(r == ObjectMonitor::OM_OK, "raw_notifyAll should have worked"); - if (r != ObjectMonitor::OM_OK) { // robustness - return JVMTI_ERROR_INTERNAL; - } - return JVMTI_ERROR_NONE; } /* end RawMonitorNotifyAll */ diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/prims/jvmtiEnvBase.cpp --- a/src/hotspot/share/prims/jvmtiEnvBase.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -659,10 +659,9 @@ // thread is not doing an Object.wait() call mon = java_thread->current_pending_monitor(); if (mon != NULL) { - // The thread is trying to enter() or raw_enter() an ObjectMonitor. + // The thread is trying to enter() an ObjectMonitor. obj = (oop)mon->object(); - // If obj == NULL, then ObjectMonitor is raw which doesn't count - // as contended for this API + assert(obj != NULL, "ObjectMonitor should have a valid object!"); } // implied else: no contended ObjectMonitor } else { diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/prims/jvmtiExport.cpp --- a/src/hotspot/share/prims/jvmtiExport.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/prims/jvmtiExport.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -1202,6 +1202,7 @@ bool JvmtiExport::_can_post_method_exit = false; bool JvmtiExport::_can_pop_frame = false; bool JvmtiExport::_can_force_early_return = false; +bool JvmtiExport::_can_get_owned_monitor_info = false; bool JvmtiExport::_early_vmstart_recorded = false; diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/prims/jvmtiExport.hpp --- a/src/hotspot/share/prims/jvmtiExport.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/prims/jvmtiExport.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -91,6 +91,7 @@ JVMTI_SUPPORT_FLAG(can_force_early_return) JVMTI_SUPPORT_FLAG(early_vmstart_recorded) + JVMTI_SUPPORT_FLAG(can_get_owned_monitor_info) // includes can_get_owned_monitor_stack_depth_info friend class JvmtiEventControllerPrivate; // should only modify these flags JVMTI_SUPPORT_FLAG(should_post_single_step) diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/prims/jvmtiManageCapabilities.cpp --- a/src/hotspot/share/prims/jvmtiManageCapabilities.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/prims/jvmtiManageCapabilities.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -367,6 +367,8 @@ JvmtiExport::set_can_pop_frame(avail.can_pop_frame); JvmtiExport::set_can_force_early_return(avail.can_force_early_return); JvmtiExport::set_should_clean_up_heap_objects(avail.can_generate_breakpoint_events); + JvmtiExport::set_can_get_owned_monitor_info(avail.can_get_owned_monitor_info || + avail.can_get_owned_monitor_stack_depth_info); } #ifndef PRODUCT diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/prims/jvmtiRawMonitor.cpp --- a/src/hotspot/share/prims/jvmtiRawMonitor.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/prims/jvmtiRawMonitor.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -30,21 +30,23 @@ #include "runtime/orderAccess.hpp" #include "runtime/thread.inline.hpp" -GrowableArray *JvmtiPendingMonitors::_monitors = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(1,true); +JvmtiRawMonitor::QNode::QNode(Thread* thread) : _next(NULL), _prev(NULL), + _event(thread->_ParkEvent), + _notified(0), _t_state(TS_RUN) { +} + +GrowableArray* JvmtiPendingMonitors::_monitors = + new (ResourceObj::C_HEAP, mtInternal) GrowableArray(1, true); void JvmtiPendingMonitors::transition_raw_monitors() { assert((Threads::number_of_threads()==1), - "Java thread has not created yet or more than one java thread \ -is running. Raw monitor transition will not work"); - JavaThread *current_java_thread = JavaThread::current(); + "Java thread has not been created yet or more than one java thread " + "is running. Raw monitor transition will not work"); + JavaThread* current_java_thread = JavaThread::current(); assert(current_java_thread->thread_state() == _thread_in_vm, "Must be in vm"); - { - ThreadBlockInVM __tbivm(current_java_thread); - for(int i=0; i< count(); i++) { - JvmtiRawMonitor *rmonitor = monitors()->at(i); - int r = rmonitor->raw_enter(current_java_thread); - assert(r == ObjectMonitor::OM_OK, "raw_enter should have worked"); - } + for (int i = 0; i < count(); i++) { + JvmtiRawMonitor* rmonitor = monitors()->at(i); + rmonitor->raw_enter(current_java_thread); } // pending monitors are converted to real monitor so delete them all. dispose(); @@ -54,13 +56,16 @@ // class JvmtiRawMonitor // -JvmtiRawMonitor::JvmtiRawMonitor(const char *name) { +JvmtiRawMonitor::JvmtiRawMonitor(const char* name) : _owner(NULL), + _recursions(0), + _entry_list(NULL), + _wait_set(NULL), + _waiters(0), + _magic(JVMTI_RM_MAGIC), + _name(NULL) { #ifdef ASSERT _name = strcpy(NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtInternal), name); -#else - _name = NULL; #endif - _magic = JVMTI_RM_MAGIC; } JvmtiRawMonitor::~JvmtiRawMonitor() { @@ -100,181 +105,179 @@ } // ------------------------------------------------------------------------- -// The raw monitor subsystem is entirely distinct from normal -// java-synchronization or jni-synchronization. raw monitors are not +// The JVMTI raw monitor subsystem is entirely distinct from normal +// java-synchronization or jni-synchronization. JVMTI raw monitors are not // associated with objects. They can be implemented in any manner // that makes sense. The original implementors decided to piggy-back -// the raw-monitor implementation on the existing Java objectMonitor mechanism. -// This flaw needs to fixed. We should reimplement raw monitors as sui-generis. -// Specifically, we should not implement raw monitors via java monitors. -// Time permitting, we should disentangle and deconvolve the two implementations -// and move the resulting raw monitor implementation over to the JVMTI directories. -// Ideally, the raw monitor implementation would be built on top of -// park-unpark and nothing else. -// -// raw monitors are used mainly by JVMTI -// The raw monitor implementation borrows the ObjectMonitor structure, -// but the operators are degenerate and extremely simple. -// -// Mixed use of a single objectMonitor instance -- as both a raw monitor -// and a normal java monitor -- is not permissible. +// the raw-monitor implementation on the existing Java ObjectMonitor mechanism. +// Now we just use a simplified form of that ObjectMonitor code. // // Note that we use the single RawMonitor_lock to protect queue operations for // _all_ raw monitors. This is a scalability impediment, but since raw monitor usage -// is deprecated and rare, this is not of concern. The RawMonitor_lock can not +// is fairly rare, this is not of concern. The RawMonitor_lock can not // be held indefinitely. The critical sections must be short and bounded. // // ------------------------------------------------------------------------- -int JvmtiRawMonitor::SimpleEnter (Thread * Self) { +void JvmtiRawMonitor::simple_enter(Thread* self) { for (;;) { - if (Atomic::replace_if_null(Self, &_owner)) { - return OS_OK ; + if (Atomic::replace_if_null(self, &_owner)) { + return; } - ObjectWaiter Node (Self) ; - Self->_ParkEvent->reset() ; // strictly optional - Node.TState = ObjectWaiter::TS_ENTER ; + QNode node(self); + self->_ParkEvent->reset(); // strictly optional + node._t_state = QNode::TS_ENTER; - RawMonitor_lock->lock_without_safepoint_check() ; - Node._next = _EntryList ; - _EntryList = &Node ; - OrderAccess::fence() ; - if (_owner == NULL && Atomic::replace_if_null(Self, &_owner)) { - _EntryList = Node._next ; - RawMonitor_lock->unlock() ; - return OS_OK ; + RawMonitor_lock->lock_without_safepoint_check(); + node._next = _entry_list; + _entry_list = &node; + OrderAccess::fence(); + if (_owner == NULL && Atomic::replace_if_null(self, &_owner)) { + _entry_list = node._next; + RawMonitor_lock->unlock(); + return; } - RawMonitor_lock->unlock() ; - while (Node.TState == ObjectWaiter::TS_ENTER) { - Self->_ParkEvent->park() ; + RawMonitor_lock->unlock(); + while (node._t_state == QNode::TS_ENTER) { + self->_ParkEvent->park(); } } } -int JvmtiRawMonitor::SimpleExit (Thread * Self) { - guarantee (_owner == Self, "invariant") ; - OrderAccess::release_store(&_owner, (void*)NULL) ; - OrderAccess::fence() ; - if (_EntryList == NULL) return OS_OK ; - ObjectWaiter * w ; +void JvmtiRawMonitor::simple_exit(Thread* self) { + guarantee(_owner == self, "invariant"); + OrderAccess::release_store(&_owner, (Thread*)NULL); + OrderAccess::fence(); + if (_entry_list == NULL) { + return; + } - RawMonitor_lock->lock_without_safepoint_check() ; - w = _EntryList ; + RawMonitor_lock->lock_without_safepoint_check(); + QNode* w = _entry_list; if (w != NULL) { - _EntryList = w->_next ; + _entry_list = w->_next; } - RawMonitor_lock->unlock() ; + RawMonitor_lock->unlock(); if (w != NULL) { - guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ; - // Once we set TState to TS_RUN the waiting thread can complete - // SimpleEnter and 'w' is pointing into random stack space. So we have - // to ensure we extract the ParkEvent (which is in type-stable memory) - // before we set the state, and then don't access 'w'. - ParkEvent * ev = w->_event ; - OrderAccess::loadstore(); - w->TState = ObjectWaiter::TS_RUN ; - OrderAccess::fence() ; - ev->unpark() ; + guarantee(w ->_t_state == QNode::TS_ENTER, "invariant"); + // Once we set _t_state to TS_RUN the waiting thread can complete + // simple_enter and 'w' is pointing into random stack space. So we have + // to ensure we extract the ParkEvent (which is in type-stable memory) + // before we set the state, and then don't access 'w'. + ParkEvent* ev = w->_event; + OrderAccess::loadstore(); + w->_t_state = QNode::TS_RUN; + OrderAccess::fence(); + ev->unpark(); } - return OS_OK ; + return; } -int JvmtiRawMonitor::SimpleWait (Thread * Self, jlong millis) { - guarantee (_owner == Self , "invariant") ; - guarantee (_recursions == 0, "invariant") ; +int JvmtiRawMonitor::simple_wait(Thread* self, jlong millis) { + guarantee(_owner == self , "invariant"); + guarantee(_recursions == 0, "invariant"); - ObjectWaiter Node (Self) ; - Node._notified = 0 ; - Node.TState = ObjectWaiter::TS_WAIT ; + QNode node(self); + node._notified = 0; + node._t_state = QNode::TS_WAIT; - RawMonitor_lock->lock_without_safepoint_check() ; - Node._next = _WaitSet ; - _WaitSet = &Node ; - RawMonitor_lock->unlock() ; + RawMonitor_lock->lock_without_safepoint_check(); + node._next = _wait_set; + _wait_set = &node; + RawMonitor_lock->unlock(); - SimpleExit (Self) ; - guarantee (_owner != Self, "invariant") ; + simple_exit(self); + guarantee(_owner != self, "invariant"); - int ret = OS_OK ; + int ret = OS_OK; if (millis <= 0) { - Self->_ParkEvent->park(); + self->_ParkEvent->park(); } else { - ret = Self->_ParkEvent->park(millis); + ret = self->_ParkEvent->park(millis); } // If thread still resides on the waitset then unlink it. // Double-checked locking -- the usage is safe in this context - // as TState is volatile and the lock-unlock operators are + // as _t_state is volatile and the lock-unlock operators are // serializing (barrier-equivalent). - if (Node.TState == ObjectWaiter::TS_WAIT) { - RawMonitor_lock->lock_without_safepoint_check() ; - if (Node.TState == ObjectWaiter::TS_WAIT) { + if (node._t_state == QNode::TS_WAIT) { + RawMonitor_lock->lock_without_safepoint_check(); + if (node._t_state == QNode::TS_WAIT) { // Simple O(n) unlink, but performance isn't critical here. - ObjectWaiter * p ; - ObjectWaiter * q = NULL ; - for (p = _WaitSet ; p != &Node; p = p->_next) { - q = p ; + QNode* p; + QNode* q = NULL; + for (p = _wait_set; p != &node; p = p->_next) { + q = p; } - guarantee (p == &Node, "invariant") ; + guarantee(p == &node, "invariant"); if (q == NULL) { - guarantee (p == _WaitSet, "invariant") ; - _WaitSet = p->_next ; + guarantee (p == _wait_set, "invariant"); + _wait_set = p->_next; } else { - guarantee (p == q->_next, "invariant") ; - q->_next = p->_next ; + guarantee(p == q->_next, "invariant"); + q->_next = p->_next; } - Node.TState = ObjectWaiter::TS_RUN ; + node._t_state = QNode::TS_RUN; } - RawMonitor_lock->unlock() ; + RawMonitor_lock->unlock(); } - guarantee (Node.TState == ObjectWaiter::TS_RUN, "invariant") ; - SimpleEnter (Self) ; + guarantee(node._t_state == QNode::TS_RUN, "invariant"); + simple_enter(self); - guarantee (_owner == Self, "invariant") ; - guarantee (_recursions == 0, "invariant") ; - return ret ; + guarantee(_owner == self, "invariant"); + guarantee(_recursions == 0, "invariant"); + return ret; } -int JvmtiRawMonitor::SimpleNotify (Thread * Self, bool All) { - guarantee (_owner == Self, "invariant") ; - if (_WaitSet == NULL) return OS_OK ; +void JvmtiRawMonitor::simple_notify(Thread* self, bool all) { + guarantee(_owner == self, "invariant"); + if (_wait_set == NULL) { + return; + } // We have two options: - // A. Transfer the threads from the WaitSet to the EntryList - // B. Remove the thread from the WaitSet and unpark() it. + // A. Transfer the threads from the _wait_set to the _entry_list + // B. Remove the thread from the _wait_set and unpark() it. // // We use (B), which is crude and results in lots of futile // context switching. In particular (B) induces lots of contention. - ParkEvent * ev = NULL ; // consider using a small auto array ... - RawMonitor_lock->lock_without_safepoint_check() ; + ParkEvent* ev = NULL; // consider using a small auto array ... + RawMonitor_lock->lock_without_safepoint_check(); for (;;) { - ObjectWaiter * w = _WaitSet ; - if (w == NULL) break ; - _WaitSet = w->_next ; - if (ev != NULL) { ev->unpark(); ev = NULL; } - ev = w->_event ; - OrderAccess::loadstore() ; - w->TState = ObjectWaiter::TS_RUN ; - OrderAccess::storeload(); - if (!All) break ; + QNode* w = _wait_set; + if (w == NULL) break; + _wait_set = w->_next; + if (ev != NULL) { + ev->unpark(); + ev = NULL; + } + ev = w->_event; + OrderAccess::loadstore(); + w->_t_state = QNode::TS_RUN; + OrderAccess::storeload(); + if (!all) { + break; + } } - RawMonitor_lock->unlock() ; - if (ev != NULL) ev->unpark(); - return OS_OK ; + RawMonitor_lock->unlock(); + if (ev != NULL) { + ev->unpark(); + } + return; } // Any JavaThread will enter here with state _thread_blocked -int JvmtiRawMonitor::raw_enter(TRAPS) { - void * Contended ; - +void JvmtiRawMonitor::raw_enter(Thread* self) { + void* contended; + JavaThread* jt = NULL; // don't enter raw monitor if thread is being externally suspended, it will // surprise the suspender if a "suspended" thread can still enter monitor - JavaThread * jt = (JavaThread *)THREAD; - if (THREAD->is_Java_thread()) { + if (self->is_Java_thread()) { + jt = (JavaThread*)self; jt->SR_lock()->lock_without_safepoint_check(); while (jt->is_external_suspend()) { jt->SR_lock()->unlock(); @@ -282,150 +285,140 @@ jt->SR_lock()->lock_without_safepoint_check(); } // guarded by SR_lock to avoid racing with new external suspend requests. - Contended = Atomic::cmpxchg(THREAD, &_owner, (void*)NULL); + contended = Atomic::cmpxchg(jt, &_owner, (Thread*)NULL); jt->SR_lock()->unlock(); } else { - Contended = Atomic::cmpxchg(THREAD, &_owner, (void*)NULL); + contended = Atomic::cmpxchg(self, &_owner, (Thread*)NULL); } - if (Contended == THREAD) { - _recursions ++ ; - return OM_OK ; + if (contended == self) { + _recursions++; + return; } - if (Contended == NULL) { - guarantee (_owner == THREAD, "invariant") ; - guarantee (_recursions == 0, "invariant") ; - return OM_OK ; + if (contended == NULL) { + guarantee(_owner == self, "invariant"); + guarantee(_recursions == 0, "invariant"); + return; } - THREAD->set_current_pending_monitor(this); - - if (!THREAD->is_Java_thread()) { - // No other non-Java threads besides VM thread would acquire - // a raw monitor. - assert(THREAD->is_VM_thread(), "must be VM thread"); - SimpleEnter (THREAD) ; - } else { - guarantee (jt->thread_state() == _thread_blocked, "invariant") ; - for (;;) { - jt->set_suspend_equivalent(); - // cleared by handle_special_suspend_equivalent_condition() or - // java_suspend_self() - SimpleEnter (THREAD) ; - - // were we externally suspended while we were waiting? - if (!jt->handle_special_suspend_equivalent_condition()) break ; + self->set_current_pending_raw_monitor(this); - // This thread was externally suspended - // - // This logic isn't needed for JVMTI raw monitors, - // but doesn't hurt just in case the suspend rules change. This - // logic is needed for the JvmtiRawMonitor.wait() reentry phase. - // We have reentered the contended monitor, but while we were - // waiting another thread suspended us. We don't want to reenter - // the monitor while suspended because that would surprise the - // thread that suspended us. - // - // Drop the lock - - SimpleExit (THREAD) ; - - jt->java_suspend_self(); - } - - assert(_owner == THREAD, "Fatal error with monitor owner!"); - assert(_recursions == 0, "Fatal error with monitor recursions!"); - } + if (!self->is_Java_thread()) { + simple_enter(self); + } else { + guarantee(jt->thread_state() == _thread_blocked, "invariant"); + for (;;) { + jt->set_suspend_equivalent(); + // cleared by handle_special_suspend_equivalent_condition() or + // java_suspend_self() + simple_enter(jt); - THREAD->set_current_pending_monitor(NULL); - guarantee (_recursions == 0, "invariant") ; - return OM_OK; -} - -// Used mainly for JVMTI raw monitor implementation -// Also used for JvmtiRawMonitor::wait(). -int JvmtiRawMonitor::raw_exit(TRAPS) { - if (THREAD != _owner) { - return OM_ILLEGAL_MONITOR_STATE; - } - if (_recursions > 0) { - --_recursions ; - return OM_OK ; - } - - void * List = _EntryList ; - SimpleExit (THREAD) ; + // were we externally suspended while we were waiting? + if (!jt->handle_special_suspend_equivalent_condition()) { + break; + } - return OM_OK; -} - -// Used for JVMTI raw monitor implementation. -// All JavaThreads will enter here with state _thread_blocked - -int JvmtiRawMonitor::raw_wait(jlong millis, bool interruptible, TRAPS) { - if (THREAD != _owner) { - return OM_ILLEGAL_MONITOR_STATE; - } + // This thread was externally suspended + // We have reentered the contended monitor, but while we were + // waiting another thread suspended us. We don't want to reenter + // the monitor while suspended because that would surprise the + // thread that suspended us. + // + // Drop the lock + simple_exit(jt); - // To avoid spurious wakeups we reset the parkevent -- This is strictly optional. - // The caller must be able to tolerate spurious returns from raw_wait(). - THREAD->_ParkEvent->reset() ; - OrderAccess::fence() ; - - // check interrupt event - if (interruptible) { - assert(THREAD->is_Java_thread(), "Only JavaThreads can be interruptible"); - JavaThread* jt = (JavaThread*) THREAD; - if (jt->is_interrupted(true)) { - return OM_INTERRUPTED; + jt->java_suspend_self(); } } - intptr_t save = _recursions ; - _recursions = 0 ; - _waiters ++ ; - if (THREAD->is_Java_thread()) { - guarantee (((JavaThread *) THREAD)->thread_state() == _thread_blocked, "invariant") ; - ((JavaThread *)THREAD)->set_suspend_equivalent(); - } - int rv = SimpleWait (THREAD, millis) ; - _recursions = save ; - _waiters -- ; + self->set_current_pending_raw_monitor(NULL); + + guarantee(_owner == self, "invariant"); + guarantee(_recursions == 0, "invariant"); +} - guarantee (THREAD == _owner, "invariant") ; - if (THREAD->is_Java_thread()) { - JavaThread * jSelf = (JavaThread *) THREAD ; - for (;;) { - if (!jSelf->handle_special_suspend_equivalent_condition()) break ; - SimpleExit (THREAD) ; - jSelf->java_suspend_self(); - SimpleEnter (THREAD) ; - jSelf->set_suspend_equivalent() ; - } +int JvmtiRawMonitor::raw_exit(Thread* self) { + if (self != _owner) { + return M_ILLEGAL_MONITOR_STATE; } - guarantee (THREAD == _owner, "invariant") ; + if (_recursions > 0) { + _recursions--; + } else { + simple_exit(self); + } - if (interruptible) { - JavaThread* jt = (JavaThread*) THREAD; - if (jt->is_interrupted(true)) { - return OM_INTERRUPTED; - } - } - return OM_OK ; + return M_OK; } -int JvmtiRawMonitor::raw_notify(TRAPS) { - if (THREAD != _owner) { - return OM_ILLEGAL_MONITOR_STATE; +// All JavaThreads will enter here with state _thread_blocked + +int JvmtiRawMonitor::raw_wait(jlong millis, bool interruptible, Thread* self) { + if (self != _owner) { + return M_ILLEGAL_MONITOR_STATE; + } + + // To avoid spurious wakeups we reset the parkevent. This is strictly optional. + // The caller must be able to tolerate spurious returns from raw_wait(). + self->_ParkEvent->reset(); + OrderAccess::fence(); + + JavaThread* jt = NULL; + // check interrupt event + if (interruptible) { + assert(self->is_Java_thread(), "Only JavaThreads can be interruptible"); + jt = (JavaThread*)self; + if (jt->is_interrupted(true)) { + return M_INTERRUPTED; + } + } else { + assert(!self->is_Java_thread(), "JavaThreads must be interuptible"); } - SimpleNotify (THREAD, false) ; - return OM_OK; + + intptr_t save = _recursions; + _recursions = 0; + _waiters++; + if (self->is_Java_thread()) { + guarantee(jt->thread_state() == _thread_blocked, "invariant"); + jt->set_suspend_equivalent(); + } + int rv = simple_wait(self, millis); + _recursions = save; + _waiters--; + + guarantee(self == _owner, "invariant"); + if (self->is_Java_thread()) { + for (;;) { + if (!jt->handle_special_suspend_equivalent_condition()) { + break; + } + simple_exit(jt); + jt->java_suspend_self(); + simple_enter(jt); + jt->set_suspend_equivalent(); + } + guarantee(jt == _owner, "invariant"); + } + + if (interruptible && jt->is_interrupted(true)) { + return M_INTERRUPTED; + } + + return M_OK; } -int JvmtiRawMonitor::raw_notifyAll(TRAPS) { - if (THREAD != _owner) { - return OM_ILLEGAL_MONITOR_STATE; +int JvmtiRawMonitor::raw_notify(Thread* self) { + if (self != _owner) { + return M_ILLEGAL_MONITOR_STATE; } - SimpleNotify (THREAD, true) ; - return OM_OK; + simple_notify(self, false); + return M_OK; } + +int JvmtiRawMonitor::raw_notifyAll(Thread* self) { + if (self != _owner) { + return M_ILLEGAL_MONITOR_STATE; + } + simple_notify(self, true); + return M_OK; +} diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/prims/jvmtiRawMonitor.hpp --- a/src/hotspot/share/prims/jvmtiRawMonitor.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/prims/jvmtiRawMonitor.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -25,7 +25,8 @@ #ifndef SHARE_PRIMS_JVMTIRAWMONITOR_HPP #define SHARE_PRIMS_JVMTIRAWMONITOR_HPP -#include "runtime/objectMonitor.hpp" +#include "memory/allocation.hpp" +#include "runtime/park.hpp" #include "utilities/growableArray.hpp" // @@ -33,32 +34,70 @@ // // Used by JVMTI methods: All RawMonitor methods (CreateRawMonitor, EnterRawMonitor, etc.) // -// Wrapper for ObjectMonitor class that saves the Monitor's name +// A simplified version of the ObjectMonitor code. // -class JvmtiRawMonitor : public ObjectMonitor { -private: - int _magic; - char * _name; +class JvmtiRawMonitor : public CHeapObj { + + // Helper class to allow Threads to be linked into queues. + // This is a stripped down version of ObjectWaiter. + class QNode : public StackObj { + friend class JvmtiRawMonitor; + enum TStates { TS_READY, TS_RUN, TS_WAIT, TS_ENTER }; + QNode* volatile _next; + QNode* volatile _prev; + ParkEvent* _event; + volatile int _notified; + volatile TStates _t_state; + + QNode(Thread* thread); + }; + + Thread* volatile _owner; // pointer to owning thread + volatile int _recursions; // recursion count, 0 for first entry + QNode* volatile _entry_list; // Threads blocked on entry or reentry. + // The list is actually composed of nodes, + // acting as proxies for Threads. + QNode* volatile _wait_set; // Threads wait()ing on the monitor + volatile jint _waiters; // number of waiting threads + int _magic; + char* _name; // JVMTI_RM_MAGIC is set in contructor and unset in destructor. enum { JVMTI_RM_MAGIC = (int)(('T' << 24) | ('I' << 16) | ('R' << 8) | 'M') }; - int SimpleEnter (Thread * Self) ; - int SimpleExit (Thread * Self) ; - int SimpleWait (Thread * Self, jlong millis) ; - int SimpleNotify (Thread * Self, bool All) ; + void simple_enter(Thread* self); + void simple_exit(Thread* self); + int simple_wait(Thread* self, jlong millis); + void simple_notify(Thread* self, bool all); + + public: -public: - JvmtiRawMonitor(const char *name); + // return codes + enum { + M_OK, // no error + M_ILLEGAL_MONITOR_STATE, // IllegalMonitorStateException + M_INTERRUPTED // Thread.interrupt() + }; + + // Non-aborting operator new + void* operator new(size_t size) throw() { + return CHeapObj::operator new(size, std::nothrow); + } + + JvmtiRawMonitor(const char* name); ~JvmtiRawMonitor(); - int raw_enter(TRAPS); - int raw_exit(TRAPS); - int raw_wait(jlong millis, bool interruptable, TRAPS); - int raw_notify(TRAPS); - int raw_notifyAll(TRAPS); - int magic() { return _magic; } - const char *get_name() { return _name; } - bool is_valid(); + + Thread* owner() const { return _owner; } + void set_owner(Thread* owner) { _owner = owner; } + int recursions() const { return _recursions; } + void raw_enter(Thread* self); + int raw_exit(Thread* self); + int raw_wait(jlong millis, bool interruptible, Thread* self); + int raw_notify(Thread* self); + int raw_notifyAll(Thread* self); + int magic() const { return _magic; } + const char* get_name() const { return _name; } + bool is_valid(); }; // Onload pending raw monitors @@ -67,8 +106,8 @@ // VM is fully initialized. class JvmtiPendingMonitors : public AllStatic { -private: - static GrowableArray *_monitors; // Cache raw monitor enter + private: + static GrowableArray* _monitors; // Cache raw monitor enter inline static GrowableArray* monitors() { return _monitors; } @@ -76,8 +115,8 @@ delete monitors(); } -public: - static void enter(JvmtiRawMonitor *monitor) { + public: + static void enter(JvmtiRawMonitor* monitor) { monitors()->append(monitor); } @@ -85,14 +124,14 @@ return monitors()->length(); } - static void destroy(JvmtiRawMonitor *monitor) { + static void destroy(JvmtiRawMonitor* monitor) { while (monitors()->contains(monitor)) { monitors()->remove(monitor); } } // Return false if monitor is not found in the list. - static bool exit(JvmtiRawMonitor *monitor) { + static bool exit(JvmtiRawMonitor* monitor) { if (monitors()->contains(monitor)) { monitors()->remove(monitor); return true; diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/prims/methodHandles.cpp --- a/src/hotspot/share/prims/methodHandles.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/prims/methodHandles.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -41,7 +41,6 @@ #include "oops/oop.inline.hpp" #include "oops/typeArrayOop.inline.hpp" #include "prims/methodHandles.hpp" -#include "runtime/compilationPolicy.hpp" #include "runtime/deoptimization.hpp" #include "runtime/fieldDescriptor.inline.hpp" #include "runtime/handles.inline.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/prims/whitebox.cpp --- a/src/hotspot/share/prims/whitebox.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/prims/whitebox.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -32,6 +32,7 @@ #include "classfile/stringTable.hpp" #include "classfile/symbolTable.hpp" #include "code/codeCache.hpp" +#include "compiler/compilationPolicy.hpp" #include "compiler/methodMatcher.hpp" #include "compiler/directivesParser.hpp" #include "gc/shared/gcConfig.hpp" @@ -58,7 +59,6 @@ #include "prims/wbtestmethods/parserTests.hpp" #include "prims/whitebox.inline.hpp" #include "runtime/arguments.hpp" -#include "runtime/compilationPolicy.hpp" #include "runtime/deoptimization.hpp" #include "runtime/fieldDescriptor.inline.hpp" #include "runtime/flags/jvmFlag.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/compilationPolicy.cpp --- a/src/hotspot/share/runtime/compilationPolicy.cpp Mon Oct 07 16:48:42 2019 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,506 +0,0 @@ -/* - * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "classfile/classLoaderDataGraph.inline.hpp" -#include "code/compiledIC.hpp" -#include "code/nmethod.hpp" -#include "code/scopeDesc.hpp" -#include "interpreter/interpreter.hpp" -#include "memory/resourceArea.hpp" -#include "oops/methodData.hpp" -#include "oops/method.inline.hpp" -#include "oops/oop.inline.hpp" -#include "prims/nativeLookup.hpp" -#include "runtime/compilationPolicy.hpp" -#include "runtime/frame.hpp" -#include "runtime/handles.inline.hpp" -#include "runtime/stubRoutines.hpp" -#include "runtime/thread.hpp" -#include "runtime/tieredThresholdPolicy.hpp" -#include "runtime/vframe.hpp" -#include "runtime/vmOperations.hpp" -#include "utilities/events.hpp" -#include "utilities/globalDefinitions.hpp" - -#ifdef COMPILER1 -#include "c1/c1_Compiler.hpp" -#endif -#ifdef COMPILER2 -#include "opto/c2compiler.hpp" -#endif - -CompilationPolicy* CompilationPolicy::_policy; - -// Determine compilation policy based on command line argument -void compilationPolicy_init() { - #ifdef TIERED - if (TieredCompilation) { - CompilationPolicy::set_policy(new TieredThresholdPolicy()); - } else { - CompilationPolicy::set_policy(new SimpleCompPolicy()); - } - #else - CompilationPolicy::set_policy(new SimpleCompPolicy()); - #endif - - CompilationPolicy::policy()->initialize(); -} - -// Returns true if m must be compiled before executing it -// This is intended to force compiles for methods (usually for -// debugging) that would otherwise be interpreted for some reason. -bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level) { - // Don't allow Xcomp to cause compiles in replay mode - if (ReplayCompiles) return false; - - if (m->has_compiled_code()) return false; // already compiled - if (!can_be_compiled(m, comp_level)) return false; - - return !UseInterpreter || // must compile all methods - (UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods -} - -void CompilationPolicy::compile_if_required(const methodHandle& selected_method, TRAPS) { - if (must_be_compiled(selected_method)) { - // This path is unusual, mostly used by the '-Xcomp' stress test mode. - - // Note: with several active threads, the must_be_compiled may be true - // while can_be_compiled is false; remove assert - // assert(CompilationPolicy::can_be_compiled(selected_method), "cannot compile"); - if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) { - // don't force compilation, resolve was on behalf of compiler - return; - } - if (selected_method->method_holder()->is_not_initialized()) { - // 'is_not_initialized' means not only '!is_initialized', but also that - // initialization has not been started yet ('!being_initialized') - // Do not force compilation of methods in uninitialized classes. - // Note that doing this would throw an assert later, - // in CompileBroker::compile_method. - // We sometimes use the link resolver to do reflective lookups - // even before classes are initialized. - return; - } - CompileBroker::compile_method(selected_method, InvocationEntryBci, - CompilationPolicy::policy()->initial_compile_level(), - methodHandle(), 0, CompileTask::Reason_MustBeCompiled, CHECK); - } -} - -// Returns true if m is allowed to be compiled -bool CompilationPolicy::can_be_compiled(const methodHandle& m, int comp_level) { - // allow any levels for WhiteBox - assert(WhiteBoxAPI || comp_level == CompLevel_all || is_compile(comp_level), "illegal compilation level"); - - if (m->is_abstract()) return false; - if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false; - - // Math intrinsics should never be compiled as this can lead to - // monotonicity problems because the interpreter will prefer the - // compiled code to the intrinsic version. This can't happen in - // production because the invocation counter can't be incremented - // but we shouldn't expose the system to this problem in testing - // modes. - if (!AbstractInterpreter::can_be_compiled(m)) { - return false; - } - if (comp_level == CompLevel_all) { - if (TieredCompilation) { - // enough to be compilable at any level for tiered - return !m->is_not_compilable(CompLevel_simple) || !m->is_not_compilable(CompLevel_full_optimization); - } else { - // must be compilable at available level for non-tiered - return !m->is_not_compilable(CompLevel_highest_tier); - } - } else if (is_compile(comp_level)) { - return !m->is_not_compilable(comp_level); - } - return false; -} - -// Returns true if m is allowed to be osr compiled -bool CompilationPolicy::can_be_osr_compiled(const methodHandle& m, int comp_level) { - bool result = false; - if (comp_level == CompLevel_all) { - if (TieredCompilation) { - // enough to be osr compilable at any level for tiered - result = !m->is_not_osr_compilable(CompLevel_simple) || !m->is_not_osr_compilable(CompLevel_full_optimization); - } else { - // must be osr compilable at available level for non-tiered - result = !m->is_not_osr_compilable(CompLevel_highest_tier); - } - } else if (is_compile(comp_level)) { - result = !m->is_not_osr_compilable(comp_level); - } - return (result && can_be_compiled(m, comp_level)); -} - -bool CompilationPolicy::is_compilation_enabled() { - // NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler - return CompileBroker::should_compile_new_jobs(); -} - -CompileTask* CompilationPolicy::select_task_helper(CompileQueue* compile_queue) { - // Remove unloaded methods from the queue - for (CompileTask* task = compile_queue->first(); task != NULL; ) { - CompileTask* next = task->next(); - if (task->is_unloaded()) { - compile_queue->remove_and_mark_stale(task); - } - task = next; - } -#if INCLUDE_JVMCI - if (UseJVMCICompiler && !BackgroundCompilation) { - /* - * In blocking compilation mode, the CompileBroker will make - * compilations submitted by a JVMCI compiler thread non-blocking. These - * compilations should be scheduled after all blocking compilations - * to service non-compiler related compilations sooner and reduce the - * chance of such compilations timing out. - */ - for (CompileTask* task = compile_queue->first(); task != NULL; task = task->next()) { - if (task->is_blocking()) { - return task; - } - } - } -#endif - return compile_queue->first(); -} - -#ifndef PRODUCT -void SimpleCompPolicy::trace_osr_completion(nmethod* osr_nm) { - if (TraceOnStackReplacement) { - if (osr_nm == NULL) tty->print_cr("compilation failed"); - else tty->print_cr("nmethod " INTPTR_FORMAT, p2i(osr_nm)); - } -} -#endif // !PRODUCT - -void SimpleCompPolicy::initialize() { - // Setup the compiler thread numbers - if (CICompilerCountPerCPU) { - // Example: if CICompilerCountPerCPU is true, then we get - // max(log2(8)-1,1) = 2 compiler threads on an 8-way machine. - // May help big-app startup time. - _compiler_count = MAX2(log2_int(os::active_processor_count())-1,1); - // Make sure there is enough space in the code cache to hold all the compiler buffers - size_t buffer_size = 1; -#ifdef COMPILER1 - buffer_size = is_client_compilation_mode_vm() ? Compiler::code_buffer_size() : buffer_size; -#endif -#ifdef COMPILER2 - buffer_size = is_server_compilation_mode_vm() ? C2Compiler::initial_code_buffer_size() : buffer_size; -#endif - int max_count = (ReservedCodeCacheSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / (int)buffer_size; - if (_compiler_count > max_count) { - // Lower the compiler count such that all buffers fit into the code cache - _compiler_count = MAX2(max_count, 1); - } - FLAG_SET_ERGO(CICompilerCount, _compiler_count); - } else { - _compiler_count = CICompilerCount; - } -} - -// Note: this policy is used ONLY if TieredCompilation is off. -// compiler_count() behaves the following way: -// - with TIERED build (with both COMPILER1 and COMPILER2 defined) it should return -// zero for the c1 compilation levels in server compilation mode runs -// and c2 compilation levels in client compilation mode runs. -// - with COMPILER2 not defined it should return zero for c2 compilation levels. -// - with COMPILER1 not defined it should return zero for c1 compilation levels. -// - if neither is defined - always return zero. -int SimpleCompPolicy::compiler_count(CompLevel comp_level) { - assert(!TieredCompilation, "This policy should not be used with TieredCompilation"); - if (COMPILER2_PRESENT(is_server_compilation_mode_vm() && is_c2_compile(comp_level) ||) - is_client_compilation_mode_vm() && is_c1_compile(comp_level)) { - return _compiler_count; - } - return 0; -} - -void SimpleCompPolicy::reset_counter_for_invocation_event(const methodHandle& m) { - // Make sure invocation and backedge counter doesn't overflow again right away - // as would be the case for native methods. - - // BUT also make sure the method doesn't look like it was never executed. - // Set carry bit and reduce counter's value to min(count, CompileThreshold/2). - MethodCounters* mcs = m->method_counters(); - assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); - mcs->invocation_counter()->set_carry(); - mcs->backedge_counter()->set_carry(); - - assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed"); -} - -void SimpleCompPolicy::reset_counter_for_back_branch_event(const methodHandle& m) { - // Delay next back-branch event but pump up invocation counter to trigger - // whole method compilation. - MethodCounters* mcs = m->method_counters(); - assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); - InvocationCounter* i = mcs->invocation_counter(); - InvocationCounter* b = mcs->backedge_counter(); - - // Don't set invocation_counter's value too low otherwise the method will - // look like immature (ic < ~5300) which prevents the inlining based on - // the type profiling. - i->set(i->state(), CompileThreshold); - // Don't reset counter too low - it is used to check if OSR method is ready. - b->set(b->state(), CompileThreshold / 2); -} - -// -// CounterDecay -// -// Iterates through invocation counters and decrements them. This -// is done at each safepoint. -// -class CounterDecay : public AllStatic { - static jlong _last_timestamp; - static void do_method(Method* m) { - MethodCounters* mcs = m->method_counters(); - if (mcs != NULL) { - mcs->invocation_counter()->decay(); - } - } -public: - static void decay(); - static bool is_decay_needed() { - return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength; - } -}; - -jlong CounterDecay::_last_timestamp = 0; - -void CounterDecay::decay() { - _last_timestamp = os::javaTimeMillis(); - - // This operation is going to be performed only at the end of a safepoint - // and hence GC's will not be going on, all Java mutators are suspended - // at this point and hence SystemDictionary_lock is also not needed. - assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint"); - size_t nclasses = ClassLoaderDataGraph::num_instance_classes(); - size_t classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 / - CounterHalfLifeTime); - for (size_t i = 0; i < classes_per_tick; i++) { - InstanceKlass* k = ClassLoaderDataGraph::try_get_next_class(); - if (k != NULL) { - k->methods_do(do_method); - } - } -} - -// Called at the end of the safepoint -void SimpleCompPolicy::do_safepoint_work() { - if(UseCounterDecay && CounterDecay::is_decay_needed()) { - CounterDecay::decay(); - } -} - -void SimpleCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) { - ScopeDesc* sd = trap_scope; - MethodCounters* mcs; - InvocationCounter* c; - for (; !sd->is_top(); sd = sd->sender()) { - mcs = sd->method()->method_counters(); - if (mcs != NULL) { - // Reset ICs of inlined methods, since they can trigger compilations also. - mcs->invocation_counter()->reset(); - } - } - mcs = sd->method()->method_counters(); - if (mcs != NULL) { - c = mcs->invocation_counter(); - if (is_osr) { - // It was an OSR method, so bump the count higher. - c->set(c->state(), CompileThreshold); - } else { - c->reset(); - } - mcs->backedge_counter()->reset(); - } -} - -// This method can be called by any component of the runtime to notify the policy -// that it's recommended to delay the compilation of this method. -void SimpleCompPolicy::delay_compilation(Method* method) { - MethodCounters* mcs = method->method_counters(); - if (mcs != NULL) { - mcs->invocation_counter()->decay(); - mcs->backedge_counter()->decay(); - } -} - -void SimpleCompPolicy::disable_compilation(Method* method) { - MethodCounters* mcs = method->method_counters(); - if (mcs != NULL) { - mcs->invocation_counter()->set_state(InvocationCounter::wait_for_nothing); - mcs->backedge_counter()->set_state(InvocationCounter::wait_for_nothing); - } -} - -CompileTask* SimpleCompPolicy::select_task(CompileQueue* compile_queue) { - return select_task_helper(compile_queue); -} - -bool SimpleCompPolicy::is_mature(Method* method) { - MethodData* mdo = method->method_data(); - assert(mdo != NULL, "Should be"); - uint current = mdo->mileage_of(method); - uint initial = mdo->creation_mileage(); - if (current < initial) - return true; // some sort of overflow - uint target; - if (ProfileMaturityPercentage <= 0) - target = (uint) -ProfileMaturityPercentage; // absolute value - else - target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 ); - return (current >= initial + target); -} - -nmethod* SimpleCompPolicy::event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, - int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) { - assert(comp_level == CompLevel_none, "This should be only called from the interpreter"); - NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci)); - if (JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) { - // If certain JVMTI events (e.g. frame pop event) are requested then the - // thread is forced to remain in interpreted code. This is - // implemented partly by a check in the run_compiled_code - // section of the interpreter whether we should skip running - // compiled code, and partly by skipping OSR compiles for - // interpreted-only threads. - if (bci != InvocationEntryBci) { - reset_counter_for_back_branch_event(method); - return NULL; - } - } - if (ReplayCompiles) { - // Don't trigger other compiles in testing mode - if (bci == InvocationEntryBci) { - reset_counter_for_invocation_event(method); - } else { - reset_counter_for_back_branch_event(method); - } - return NULL; - } - - if (bci == InvocationEntryBci) { - // when code cache is full, compilation gets switched off, UseCompiler - // is set to false - if (!method->has_compiled_code() && UseCompiler) { - method_invocation_event(method, thread); - } else { - // Force counter overflow on method entry, even if no compilation - // happened. (The method_invocation_event call does this also.) - reset_counter_for_invocation_event(method); - } - // compilation at an invocation overflow no longer goes and retries test for - // compiled method. We always run the loser of the race as interpreted. - // so return NULL - return NULL; - } else { - // counter overflow in a loop => try to do on-stack-replacement - nmethod* osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true); - NOT_PRODUCT(trace_osr_request(method, osr_nm, bci)); - // when code cache is full, we should not compile any more... - if (osr_nm == NULL && UseCompiler) { - method_back_branch_event(method, bci, thread); - osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true); - } - if (osr_nm == NULL) { - reset_counter_for_back_branch_event(method); - return NULL; - } - return osr_nm; - } - return NULL; -} - -#ifndef PRODUCT -void SimpleCompPolicy::trace_frequency_counter_overflow(const methodHandle& m, int branch_bci, int bci) { - if (TraceInvocationCounterOverflow) { - MethodCounters* mcs = m->method_counters(); - assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); - InvocationCounter* ic = mcs->invocation_counter(); - InvocationCounter* bc = mcs->backedge_counter(); - ResourceMark rm; - if (bci == InvocationEntryBci) { - tty->print("comp-policy cntr ovfl @ %d in entry of ", bci); - } else { - tty->print("comp-policy cntr ovfl @ %d in loop of ", bci); - } - m->print_value(); - tty->cr(); - ic->print(); - bc->print(); - if (ProfileInterpreter) { - if (bci != InvocationEntryBci) { - MethodData* mdo = m->method_data(); - if (mdo != NULL) { - ProfileData *pd = mdo->bci_to_data(branch_bci); - if (pd == NULL) { - tty->print_cr("back branch count = N/A (missing ProfileData)"); - } else { - tty->print_cr("back branch count = %d", pd->as_JumpData()->taken()); - } - } - } - } - } -} - -void SimpleCompPolicy::trace_osr_request(const methodHandle& method, nmethod* osr, int bci) { - if (TraceOnStackReplacement) { - ResourceMark rm; - tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for "); - method->print_short_name(tty); - tty->print_cr(" at bci %d", bci); - } -} -#endif // !PRODUCT - -void SimpleCompPolicy::method_invocation_event(const methodHandle& m, JavaThread* thread) { - const int comp_level = CompLevel_highest_tier; - const int hot_count = m->invocation_count(); - reset_counter_for_invocation_event(m); - - if (is_compilation_enabled() && can_be_compiled(m, comp_level)) { - CompiledMethod* nm = m->code(); - if (nm == NULL ) { - CompileBroker::compile_method(m, InvocationEntryBci, comp_level, m, hot_count, CompileTask::Reason_InvocationCount, thread); - } - } -} - -void SimpleCompPolicy::method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread) { - const int comp_level = CompLevel_highest_tier; - const int hot_count = m->backedge_count(); - - if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) { - CompileBroker::compile_method(m, bci, comp_level, m, hot_count, CompileTask::Reason_BackedgeCount, thread); - NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));) - } -} diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/compilationPolicy.hpp --- a/src/hotspot/share/runtime/compilationPolicy.hpp Mon Oct 07 16:48:42 2019 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,113 +0,0 @@ -/* - * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_RUNTIME_COMPILATIONPOLICY_HPP -#define SHARE_RUNTIME_COMPILATIONPOLICY_HPP - -#include "code/nmethod.hpp" -#include "compiler/compileBroker.hpp" -#include "memory/allocation.hpp" -#include "runtime/vmOperations.hpp" -#include "utilities/growableArray.hpp" - -// The CompilationPolicy selects which method (if any) should be compiled. -// It also decides which methods must always be compiled (i.e., are never -// interpreted). -class CompileTask; -class CompileQueue; - -class CompilationPolicy : public CHeapObj { - static CompilationPolicy* _policy; - - // m must be compiled before executing it - static bool must_be_compiled(const methodHandle& m, int comp_level = CompLevel_all); - -public: - // If m must_be_compiled then request a compilation from the CompileBroker. - // This supports the -Xcomp option. - static void compile_if_required(const methodHandle& m, TRAPS); - - // m is allowed to be compiled - static bool can_be_compiled(const methodHandle& m, int comp_level = CompLevel_all); - // m is allowed to be osr compiled - static bool can_be_osr_compiled(const methodHandle& m, int comp_level = CompLevel_all); - static bool is_compilation_enabled(); - static void set_policy(CompilationPolicy* policy) { _policy = policy; } - static CompilationPolicy* policy() { return _policy; } - - static CompileTask* select_task_helper(CompileQueue* compile_queue); - - // Return initial compile level that is used with Xcomp - virtual CompLevel initial_compile_level() = 0; - virtual int compiler_count(CompLevel comp_level) = 0; - // main notification entry, return a pointer to an nmethod if the OSR is required, - // returns NULL otherwise. - virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) = 0; - // safepoint() is called at the end of the safepoint - virtual void do_safepoint_work() = 0; - // reprofile request - virtual void reprofile(ScopeDesc* trap_scope, bool is_osr) = 0; - // delay_compilation(method) can be called by any component of the runtime to notify the policy - // that it's recommended to delay the compilation of this method. - virtual void delay_compilation(Method* method) = 0; - // disable_compilation() is called whenever the runtime decides to disable compilation of the - // specified method. - virtual void disable_compilation(Method* method) = 0; - // Select task is called by CompileBroker. The queue is guaranteed to have at least one - // element and is locked. The function should select one and return it. - virtual CompileTask* select_task(CompileQueue* compile_queue) = 0; - // Tell the runtime if we think a given method is adequately profiled. - virtual bool is_mature(Method* method) = 0; - // Do policy initialization - virtual void initialize() = 0; - virtual bool should_not_inline(ciEnv* env, ciMethod* method) { return false; } -}; - -// A simple compilation policy. -class SimpleCompPolicy : public CompilationPolicy { - int _compiler_count; - private: - static void trace_frequency_counter_overflow(const methodHandle& m, int branch_bci, int bci); - static void trace_osr_request(const methodHandle& method, nmethod* osr, int bci); - static void trace_osr_completion(nmethod* osr_nm); - void reset_counter_for_invocation_event(const methodHandle& method); - void reset_counter_for_back_branch_event(const methodHandle& method); - void method_invocation_event(const methodHandle& m, JavaThread* thread); - void method_back_branch_event(const methodHandle& m, int bci, JavaThread* thread); - public: - SimpleCompPolicy() : _compiler_count(0) { } - virtual CompLevel initial_compile_level() { return CompLevel_highest_tier; } - virtual int compiler_count(CompLevel comp_level); - virtual void do_safepoint_work(); - virtual void reprofile(ScopeDesc* trap_scope, bool is_osr); - virtual void delay_compilation(Method* method); - virtual void disable_compilation(Method* method); - virtual bool is_mature(Method* method); - virtual void initialize(); - virtual CompileTask* select_task(CompileQueue* compile_queue); - virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread); -}; - - -#endif // SHARE_RUNTIME_COMPILATIONPOLICY_HPP diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/deoptimization.cpp --- a/src/hotspot/share/runtime/deoptimization.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/runtime/deoptimization.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -31,6 +31,7 @@ #include "code/nmethod.hpp" #include "code/pcDesc.hpp" #include "code/scopeDesc.hpp" +#include "compiler/compilationPolicy.hpp" #include "interpreter/bytecode.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/oopMapCache.hpp" @@ -48,7 +49,6 @@ #include "oops/verifyOopClosure.hpp" #include "prims/jvmtiThreadState.hpp" #include "runtime/biasedLocking.hpp" -#include "runtime/compilationPolicy.hpp" #include "runtime/deoptimization.hpp" #include "runtime/fieldDescriptor.hpp" #include "runtime/fieldDescriptor.inline.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/globals.hpp --- a/src/hotspot/share/runtime/globals.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/runtime/globals.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -1047,6 +1047,9 @@ diagnostic(bool, EnableThreadSMRStatistics, trueInDebug, \ "Enable Thread SMR Statistics") \ \ + product(bool, UseNotificationThread, true, \ + "Use Notification Thread") \ + \ product(bool, Inline, true, \ "Enable inlining") \ \ @@ -1194,9 +1197,6 @@ develop(bool, TraceCreateZombies, false, \ "trace creation of zombie nmethods") \ \ - notproduct(bool, IgnoreLockingAssertions, false, \ - "disable locking assertions (for speed)") \ - \ product(bool, RangeCheckElimination, true, \ "Eliminate range checks") \ \ diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/interfaceSupport.cpp --- a/src/hotspot/share/runtime/interfaceSupport.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/runtime/interfaceSupport.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -55,12 +55,6 @@ if (WalkStackALot) { InterfaceSupport::walk_stack(); } -#ifdef COMPILER2 - // This option is not used by Compiler 1 - if (StressDerivedPointers) { - InterfaceSupport::stress_derived_pointers(); - } -#endif if (DeoptimizeALot || DeoptimizeRandom) { InterfaceSupport::deoptimizeAll(); } @@ -234,31 +228,6 @@ } -void InterfaceSupport::stress_derived_pointers() { -#ifdef COMPILER2 - JavaThread *thread = JavaThread::current(); - if (!is_init_completed()) return; - ResourceMark rm(thread); - bool found = false; - for (StackFrameStream sfs(thread); !sfs.is_done() && !found; sfs.next()) { - CodeBlob* cb = sfs.current()->cb(); - if (cb != NULL && cb->oop_maps() ) { - // Find oopmap for current method - const ImmutableOopMap* map = cb->oop_map_for_return_address(sfs.current()->pc()); - assert(map != NULL, "no oopmap found for pc"); - found = map->has_derived_pointer(); - } - } - if (found) { - // $$$ Not sure what to do here. - /* - Scavenge::invoke(0); - */ - } -#endif -} - - void InterfaceSupport::verify_stack() { JavaThread* thread = JavaThread::current(); ResourceMark rm(thread); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/interfaceSupport.inline.hpp --- a/src/hotspot/share/runtime/interfaceSupport.inline.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/runtime/interfaceSupport.inline.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -62,7 +62,6 @@ static void zombieAll(); static void deoptimizeAll(); - static void stress_derived_pointers(); static void verify_stack(); static void verify_last_frame(); # endif diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/java.cpp --- a/src/hotspot/share/runtime/java.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/runtime/java.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -56,7 +56,6 @@ #include "prims/jvmtiExport.hpp" #include "runtime/arguments.hpp" #include "runtime/biasedLocking.hpp" -#include "runtime/compilationPolicy.hpp" #include "runtime/deoptimization.hpp" #include "runtime/flags/flagSetting.hpp" #include "runtime/handles.inline.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/javaCalls.cpp --- a/src/hotspot/share/runtime/javaCalls.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/runtime/javaCalls.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -26,6 +26,7 @@ #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" #include "code/nmethod.hpp" +#include "compiler/compilationPolicy.hpp" #include "compiler/compileBroker.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/linkResolver.hpp" @@ -33,7 +34,6 @@ #include "oops/method.inline.hpp" #include "oops/oop.inline.hpp" #include "prims/jniCheck.hpp" -#include "runtime/compilationPolicy.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaCalls.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/mutexLocker.cpp --- a/src/hotspot/share/runtime/mutexLocker.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/runtime/mutexLocker.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -66,7 +66,6 @@ Mutex* RetData_lock = NULL; Monitor* VMOperationQueue_lock = NULL; Monitor* VMOperationRequest_lock = NULL; -Monitor* SerializePage_lock = NULL; Monitor* Threads_lock = NULL; Mutex* NonJavaThreadsList_lock = NULL; Mutex* NonJavaThreadsListSync_lock = NULL; @@ -116,8 +115,10 @@ Mutex* Management_lock = NULL; Monitor* Service_lock = NULL; +Monitor* Notification_lock = NULL; Monitor* PeriodicTask_lock = NULL; Monitor* RedefineClasses_lock = NULL; +Mutex* Verify_lock = NULL; #if INCLUDE_JFR Mutex* JfrStacktrace_lock = NULL; @@ -160,7 +161,6 @@ #ifdef ASSERT void assert_locked_or_safepoint(const Mutex* lock) { // check if this thread owns the lock (common case) - if (IgnoreLockingAssertions) return; assert(lock != NULL, "Need non-NULL lock"); if (lock->owned_by_self()) return; if (SafepointSynchronize::is_at_safepoint()) return; @@ -173,7 +173,6 @@ // a weaker assertion than the above void assert_locked_or_safepoint_weak(const Mutex* lock) { - if (IgnoreLockingAssertions) return; assert(lock != NULL, "Need non-NULL lock"); if (lock->is_locked()) return; if (SafepointSynchronize::is_at_safepoint()) return; @@ -183,7 +182,6 @@ // a stronger assertion than the above void assert_lock_strong(const Mutex* lock) { - if (IgnoreLockingAssertions) return; assert(lock != NULL, "Need non-NULL lock"); if (lock->owned_by_self()) return; fatal("must own lock %s", lock->name()); @@ -236,6 +234,13 @@ def(Patching_lock , PaddedMutex , special, true, _safepoint_check_never); // used for safepointing and code patching. def(CompiledMethod_lock , PaddedMutex , special-1, true, _safepoint_check_never); def(Service_lock , PaddedMonitor, special, true, _safepoint_check_never); // used for service thread operations + + if (UseNotificationThread) { + def(Notification_lock , PaddedMonitor, special, true, _safepoint_check_never); // used for notification thread operations + } else { + Notification_lock = Service_lock; + } + def(JmethodIdCreation_lock , PaddedMutex , leaf, true, _safepoint_check_always); // used for creating jmethodIDs. def(SystemDictionary_lock , PaddedMonitor, leaf, true, _safepoint_check_always); @@ -298,6 +303,7 @@ def(CompileThread_lock , PaddedMonitor, nonleaf+5, false, _safepoint_check_always); def(PeriodicTask_lock , PaddedMonitor, nonleaf+5, true, _safepoint_check_always); def(RedefineClasses_lock , PaddedMonitor, nonleaf+5, true, _safepoint_check_always); + def(Verify_lock , PaddedMutex, nonleaf+5, true, _safepoint_check_always); if (WhiteBoxAPI) { def(Compilation_lock , PaddedMonitor, leaf, false, _safepoint_check_never); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/mutexLocker.hpp --- a/src/hotspot/share/runtime/mutexLocker.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/runtime/mutexLocker.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -103,7 +103,6 @@ extern Mutex* RawMonitor_lock; extern Mutex* PerfDataMemAlloc_lock; // a lock on the allocator for PerfData memory for performance data extern Mutex* PerfDataManager_lock; // a long on access to PerfDataManager resources -extern Mutex* ParkerFreeList_lock; extern Mutex* OopMapCacheAlloc_lock; // protects allocation of oop_map caches extern Mutex* FreeList_lock; // protects the free region list during safepoints @@ -112,8 +111,10 @@ extern Mutex* Management_lock; // a lock used to serialize JVM management extern Monitor* Service_lock; // a lock used for service thread operation +extern Monitor* Notification_lock; // a lock used for notification thread operation extern Monitor* PeriodicTask_lock; // protects the periodic task structure extern Monitor* RedefineClasses_lock; // locks classes from parallel redefinition +extern Mutex* Verify_lock; // synchronize initialization of verify library extern Monitor* ThreadsSMRDelete_lock; // Used by ThreadsSMRSupport to take pressure off the Threads_lock extern Mutex* ThreadIdTableCreate_lock; // Used by ThreadIdTable to lazily create the thread id table extern Mutex* SharedDecoder_lock; // serializes access to the decoder during normal (not error reporting) use diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/notificationThread.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/runtime/notificationThread.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "memory/universe.hpp" +#include "runtime/interfaceSupport.inline.hpp" +#include "runtime/javaCalls.hpp" +#include "runtime/notificationThread.hpp" +#include "services/diagnosticArgument.hpp" +#include "services/diagnosticFramework.hpp" +#include "services/gcNotifier.hpp" +#include "services/lowMemoryDetector.hpp" + +NotificationThread* NotificationThread::_instance = NULL; + +void NotificationThread::initialize() { + EXCEPTION_MARK; + + const char* name = "Notification Thread"; + Handle string = java_lang_String::create_from_str(name, CHECK); + + // Initialize thread_oop to put it into the system threadGroup + Handle thread_group (THREAD, Universe::system_thread_group()); + Handle thread_oop = JavaCalls::construct_new_instance( + SystemDictionary::Thread_klass(), + vmSymbols::threadgroup_string_void_signature(), + thread_group, + string, + CHECK); + + Klass* group = SystemDictionary::ThreadGroup_klass(); + JavaValue result(T_VOID); + JavaCalls::call_special(&result, + thread_group, + group, + vmSymbols::add_method_name(), + vmSymbols::thread_void_signature(), + thread_oop, + THREAD); + { + MutexLocker mu(Threads_lock); + NotificationThread* thread = new NotificationThread(¬ification_thread_entry); + + // At this point it may be possible that no osthread was created for the + // JavaThread due to lack of memory. We would have to throw an exception + // in that case. However, since this must work and we do not allow + // exceptions anyway, check and abort if this fails. + if (thread == NULL || thread->osthread() == NULL) { + vm_exit_during_initialization("java.lang.OutOfMemoryError", + os::native_thread_creation_failed_msg()); + } + + java_lang_Thread::set_thread(thread_oop(), thread); + java_lang_Thread::set_priority(thread_oop(), NearMaxPriority); + java_lang_Thread::set_daemon(thread_oop()); + thread->set_threadObj(thread_oop()); + _instance = thread; + + Threads::add(thread); + Thread::start(thread); + } +} + + + +void NotificationThread::notification_thread_entry(JavaThread* jt, TRAPS) { + while (true) { + bool sensors_changed = false; + bool has_dcmd_notification_event = false; + bool has_gc_notification_event = false; + { + // Need state transition ThreadBlockInVM so that this thread + // will be handled by safepoint correctly when this thread is + // notified at a safepoint. + + ThreadBlockInVM tbivm(jt); + + MonitorLocker ml(Notification_lock, Mutex::_no_safepoint_check_flag); + // Process all available work on each (outer) iteration, rather than + // only the first recognized bit of work, to avoid frequently true early + // tests from potentially starving later work. Hence the use of + // arithmetic-or to combine results; we don't want short-circuiting. + while (((sensors_changed = LowMemoryDetector::has_pending_requests()) | + (has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification()) | + (has_gc_notification_event = GCNotifier::has_event())) + == 0) { + // Wait as a suspend equalent until notified that there is some work to do. + ml.wait(0, true); + } + + } + + if (sensors_changed) { + LowMemoryDetector::process_sensor_changes(jt); + } + + if(has_gc_notification_event) { + GCNotifier::sendNotification(CHECK); + } + + if(has_dcmd_notification_event) { + DCmdFactory::send_notification(CHECK); + } + + } +} + diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/notificationThread.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/runtime/notificationThread.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_RUNTIME_NOTIFICATIONTHREAD_HPP +#define SHARE_RUNTIME_NOTIFICATIONTHREAD_HPP + +#include "runtime/thread.hpp" + +// A JavaThread for low memory detection support, GC and +// diagnostic framework notifications. This thread is not hidden +// from the external view to allow the debugger to stop at the +// breakpoints inside registred MXBean notification listeners. + +class NotificationThread : public JavaThread { + friend class VMStructs; + private: + + static NotificationThread* _instance; + + static void notification_thread_entry(JavaThread* thread, TRAPS); + NotificationThread(ThreadFunction entry_point) : JavaThread(entry_point) {}; + + public: + static void initialize(); + +}; + +#endif // SHARE_RUNTIME_NOTIFICATIONTHREAD_HPP diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/objectMonitor.hpp --- a/src/hotspot/share/runtime/objectMonitor.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/runtime/objectMonitor.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -43,7 +43,6 @@ class ObjectWaiter : public StackObj { public: enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ }; - enum Sorted { PREPEND, APPEND, SORTED }; ObjectWaiter* volatile _next; ObjectWaiter* volatile _prev; Thread* _thread; @@ -51,7 +50,6 @@ ParkEvent * _event; volatile int _notified; volatile TStates TState; - Sorted _Sorted; // List placement disposition bool _active; // Contention monitoring is enabled public: ObjectWaiter(Thread* thread); @@ -68,10 +66,6 @@ // WARNING: This is a very sensitive and fragile class. DO NOT make any // changes unless you are fully aware of the underlying semantics. // -// Class JvmtiRawMonitor currently inherits from ObjectMonitor so -// changes in this class must be careful to not break JvmtiRawMonitor. -// These two subsystems should be separated. -// // ObjectMonitor Layout Overview/Highlights/Restrictions: // // - The _header field must be at offset 0 because the displaced header @@ -127,16 +121,6 @@ // in a 64-bit JVM. class ObjectMonitor { - public: - enum { - OM_OK, // no error - OM_SYSTEM_ERROR, // operating system error - OM_ILLEGAL_MONITOR_STATE, // IllegalMonitorStateException - OM_INTERRUPTED, // Thread.interrupt() - OM_TIMED_OUT // Object.wait() timed out - }; - - private: friend class ObjectSynchronizer; friend class ObjectWaiter; friend class VMStructs; @@ -158,16 +142,13 @@ DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile markWord) + sizeof(void* volatile) + sizeof(ObjectMonitor *)); - protected: // protected for JvmtiRawMonitor void* volatile _owner; // pointer to owning thread OR BasicLock - private: volatile jlong _previous_owner_tid; // thread id of the previous owner of the monitor - protected: // protected for JvmtiRawMonitor volatile intptr_t _recursions; // recursion count, 0 for first entry ObjectWaiter* volatile _EntryList; // Threads blocked on entry or reentry. // The list is actually composed of WaitNodes, // acting as proxies for Threads. - private: + ObjectWaiter* volatile _cxq; // LL of recently-arrived threads blocked on entry. Thread* volatile _succ; // Heir presumptive thread - used for futile wakeup throttling Thread* volatile _Responsible; diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/os.cpp --- a/src/hotspot/share/runtime/os.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/runtime/os.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -536,14 +536,6 @@ char buffer[JVM_MAXPATHLEN]; char ebuf[1024]; - // Try to load verify dll first. In 1.3 java dll depends on it and is not - // always able to find it when the loading executable is outside the JDK. - // In order to keep working with 1.2 we ignore any loading errors. - if (dll_locate_lib(buffer, sizeof(buffer), Arguments::get_dll_dir(), - "verify")) { - dll_load(buffer, ebuf, sizeof(ebuf)); - } - // Load java dll if (dll_locate_lib(buffer, sizeof(buffer), Arguments::get_dll_dir(), "java")) { diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/safepoint.cpp --- a/src/hotspot/share/runtime/safepoint.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/runtime/safepoint.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -33,6 +33,7 @@ #include "code/nmethod.hpp" #include "code/pcDesc.hpp" #include "code/scopeDesc.hpp" +#include "compiler/compilationPolicy.hpp" #include "gc/shared/collectedHeap.hpp" #include "gc/shared/gcLocker.hpp" #include "gc/shared/oopStorage.hpp" @@ -47,7 +48,6 @@ #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" #include "runtime/atomic.hpp" -#include "runtime/compilationPolicy.hpp" #include "runtime/deoptimization.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/serviceThread.cpp --- a/src/hotspot/share/runtime/serviceThread.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/runtime/serviceThread.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -43,6 +43,7 @@ #include "services/diagnosticFramework.hpp" #include "services/gcNotifier.hpp" #include "services/lowMemoryDetector.hpp" +#include "services/threadIdTable.hpp" ServiceThread* ServiceThread::_instance = NULL; @@ -101,6 +102,7 @@ bool stringtable_work = false; bool symboltable_work = false; bool resolved_method_table_work = false; + bool thread_id_table_work = false; bool protection_domain_table_work = false; bool oopstorage_work = false; JvmtiDeferredEvent jvmti_event; @@ -120,13 +122,14 @@ // only the first recognized bit of work, to avoid frequently true early // tests from potentially starving later work. Hence the use of // arithmetic-or to combine results; we don't want short-circuiting. - while (((sensors_changed = LowMemoryDetector::has_pending_requests()) | + while (((sensors_changed = (!UseNotificationThread && LowMemoryDetector::has_pending_requests())) | (has_jvmti_events = JvmtiDeferredEventQueue::has_events()) | - (has_gc_notification_event = GCNotifier::has_event()) | - (has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification()) | + (has_gc_notification_event = (!UseNotificationThread && GCNotifier::has_event())) | + (has_dcmd_notification_event = (!UseNotificationThread && DCmdFactory::has_pending_jmx_notification())) | (stringtable_work = StringTable::has_work()) | (symboltable_work = SymbolTable::has_work()) | (resolved_method_table_work = ResolvedMethodTable::has_work()) | + (thread_id_table_work = ThreadIdTable::has_work()) | (protection_domain_table_work = SystemDictionary::pd_cache_table()->has_work()) | (oopstorage_work = OopStorage::has_cleanup_work_and_reset()) ) == 0) { @@ -151,22 +154,28 @@ jvmti_event.post(); } - if (sensors_changed) { - LowMemoryDetector::process_sensor_changes(jt); - } + if (!UseNotificationThread) { + if (sensors_changed) { + LowMemoryDetector::process_sensor_changes(jt); + } - if(has_gc_notification_event) { - GCNotifier::sendNotification(CHECK); - } + if(has_gc_notification_event) { + GCNotifier::sendNotification(CHECK); + } - if(has_dcmd_notification_event) { - DCmdFactory::send_notification(CHECK); + if(has_dcmd_notification_event) { + DCmdFactory::send_notification(CHECK); + } } if (resolved_method_table_work) { ResolvedMethodTable::do_concurrent_work(jt); } + if (thread_id_table_work) { + ThreadIdTable::do_concurrent_work(jt); + } + if (protection_domain_table_work) { SystemDictionary::pd_cache_table()->unlink(); } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/serviceThread.hpp --- a/src/hotspot/share/runtime/serviceThread.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/runtime/serviceThread.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -27,8 +27,10 @@ #include "runtime/thread.hpp" -// A JavaThread for low memory detection support and JVMTI -// compiled-method-load events. +// A hidden from external view JavaThread for JVMTI compiled-method-load +// events, oop storage cleanup, and the maintainance of string, symbol, +// protection domain, and resolved method tables. + class ServiceThread : public JavaThread { friend class VMStructs; private: diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/sharedRuntime.cpp --- a/src/hotspot/share/runtime/sharedRuntime.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/runtime/sharedRuntime.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -57,7 +57,6 @@ #include "runtime/arguments.hpp" #include "runtime/atomic.hpp" #include "runtime/biasedLocking.hpp" -#include "runtime/compilationPolicy.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/sweeper.cpp --- a/src/hotspot/share/runtime/sweeper.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/runtime/sweeper.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -38,7 +38,6 @@ #include "memory/universe.hpp" #include "oops/method.hpp" #include "runtime/atomic.hpp" -#include "runtime/compilationPolicy.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/handshake.hpp" #include "runtime/mutexLocker.hpp" diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/thread.cpp --- a/src/hotspot/share/runtime/thread.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/runtime/thread.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -258,6 +258,7 @@ _current_pending_monitor = NULL; _current_pending_monitor_is_from_java = true; _current_waiting_monitor = NULL; + _current_pending_raw_monitor = NULL; _num_nested_signal = 0; om_free_list = NULL; om_free_count = 0; @@ -3847,7 +3848,7 @@ // Create the VMThread { TraceTime timer("Start VMThread", TRACETIME_LOG(Info, startuptime)); - VMThread::create(); + VMThread::create(); Thread* vmthread = VMThread::vm_thread(); if (!os::create_thread(vmthread, os::vm_thread)) { diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/thread.hpp --- a/src/hotspot/share/runtime/thread.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/runtime/thread.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -62,6 +62,7 @@ class ThreadsList; class ThreadsSMRSupport; +class JvmtiRawMonitor; class JvmtiThreadState; class ThreadStatistics; class ConcurrentLocksDump; @@ -404,6 +405,9 @@ ObjectMonitor* _current_pending_monitor; // ObjectMonitor this thread // is waiting to lock bool _current_pending_monitor_is_from_java; // locking is from Java code + JvmtiRawMonitor* _current_pending_raw_monitor; // JvmtiRawMonitor this thread + // is waiting to lock + // ObjectMonitor on which this thread called Object.wait() ObjectMonitor* _current_waiting_monitor; @@ -640,6 +644,14 @@ _current_waiting_monitor = monitor; } + // For tracking the Jvmti raw monitor the thread is pending on. + JvmtiRawMonitor* current_pending_raw_monitor() { + return _current_pending_raw_monitor; + } + void set_current_pending_raw_monitor(JvmtiRawMonitor* monitor) { + _current_pending_raw_monitor = monitor; + } + // GC support // Apply "f->do_oop" to all root oops in "this". // Used by JavaThread::oops_do. @@ -786,7 +798,7 @@ public: volatile intptr_t _Stalled; volatile int _TypeTag; - ParkEvent * _ParkEvent; // for synchronized() + ParkEvent * _ParkEvent; // for Object monitors and JVMTI raw monitors ParkEvent * _MuxEvent; // for low-level muxAcquire-muxRelease int NativeSyncRecursion; // diagnostic diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/threadSMR.cpp --- a/src/hotspot/share/runtime/threadSMR.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/runtime/threadSMR.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -528,6 +528,22 @@ return; } + if ( _thread == VM_Exit::shutdown_thread()) { + // The shutdown thread has removed itself from the Threads + // list and is safe to have a waiver from this check because + // VM_Exit::_shutdown_thread is not set until after the VMThread + // has started the final safepoint which holds the Threads_lock + // for the remainder of the VM's life. + return; + } + + if (VMError::is_error_reported() && + VMError::get_first_error_tid() == os::current_thread_id()) { + // If there is an error reported by this thread it may use ThreadsList even + // if it's unsafe. + return; + } + // The closure will attempt to verify that the calling thread can // be found by threads_do() on the specified ThreadsList. If it // is successful, then the specified ThreadsList was acquired as @@ -540,12 +556,6 @@ // ThreadsList is not a stable hazard ptr and can be freed by // another thread from the to-be-deleted list at any time. // - // Note: The shutdown thread has removed itself from the Threads - // list and is safe to have a waiver from this check because - // VM_Exit::_shutdown_thread is not set until after the VMThread - // has started the final safepoint which holds the Threads_lock - // for the remainder of the VM's life. - // VerifyHazardPtrThreadClosure cl(_thread); ThreadsSMRSupport::threads_do(&cl, _list); @@ -555,7 +565,7 @@ // In either case, we won't get past this point with a badly placed // ThreadsListHandle. - assert(cl.found() || _thread == VM_Exit::shutdown_thread(), "Acquired a ThreadsList snapshot from a thread not recognized by the Thread-SMR protocol."); + assert(cl.found(), "Acquired a ThreadsList snapshot from a thread not recognized by the Thread-SMR protocol."); #endif } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/tieredThresholdPolicy.cpp --- a/src/hotspot/share/runtime/tieredThresholdPolicy.cpp Mon Oct 07 16:48:42 2019 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1005 +0,0 @@ -/* - * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "compiler/compileBroker.hpp" -#include "compiler/compilerOracle.hpp" -#include "memory/resourceArea.hpp" -#include "runtime/arguments.hpp" -#include "runtime/handles.inline.hpp" -#include "runtime/safepoint.hpp" -#include "runtime/safepointVerifiers.hpp" -#include "runtime/tieredThresholdPolicy.hpp" -#include "code/scopeDesc.hpp" -#include "oops/method.inline.hpp" -#if INCLUDE_JVMCI -#include "jvmci/jvmci.hpp" -#endif - -#ifdef TIERED - -#include "c1/c1_Compiler.hpp" -#include "opto/c2compiler.hpp" - -template -bool TieredThresholdPolicy::call_predicate_helper(int i, int b, double scale, Method* method) { - double threshold_scaling; - if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) { - scale *= threshold_scaling; - } - switch(level) { - case CompLevel_aot: - return (i >= Tier3AOTInvocationThreshold * scale) || - (i >= Tier3AOTMinInvocationThreshold * scale && i + b >= Tier3AOTCompileThreshold * scale); - case CompLevel_none: - case CompLevel_limited_profile: - return (i >= Tier3InvocationThreshold * scale) || - (i >= Tier3MinInvocationThreshold * scale && i + b >= Tier3CompileThreshold * scale); - case CompLevel_full_profile: - return (i >= Tier4InvocationThreshold * scale) || - (i >= Tier4MinInvocationThreshold * scale && i + b >= Tier4CompileThreshold * scale); - } - return true; -} - -template -bool TieredThresholdPolicy::loop_predicate_helper(int i, int b, double scale, Method* method) { - double threshold_scaling; - if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) { - scale *= threshold_scaling; - } - switch(level) { - case CompLevel_aot: - return b >= Tier3AOTBackEdgeThreshold * scale; - case CompLevel_none: - case CompLevel_limited_profile: - return b >= Tier3BackEdgeThreshold * scale; - case CompLevel_full_profile: - return b >= Tier4BackEdgeThreshold * scale; - } - return true; -} - -// Simple methods are as good being compiled with C1 as C2. -// Determine if a given method is such a case. -bool TieredThresholdPolicy::is_trivial(Method* method) { - if (method->is_accessor() || - method->is_constant_getter()) { - return true; - } - return false; -} - -bool TieredThresholdPolicy::should_compile_at_level_simple(Method* method) { - if (TieredThresholdPolicy::is_trivial(method)) { - return true; - } -#if INCLUDE_JVMCI - if (UseJVMCICompiler) { - AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization); - if (comp != NULL && comp->is_jvmci() && ((JVMCICompiler*) comp)->force_comp_at_level_simple(method)) { - return true; - } - } -#endif - return false; -} - -CompLevel TieredThresholdPolicy::comp_level(Method* method) { - CompiledMethod *nm = method->code(); - if (nm != NULL && nm->is_in_use()) { - return (CompLevel)nm->comp_level(); - } - return CompLevel_none; -} - -void TieredThresholdPolicy::print_counters(const char* prefix, const methodHandle& mh) { - int invocation_count = mh->invocation_count(); - int backedge_count = mh->backedge_count(); - MethodData* mdh = mh->method_data(); - int mdo_invocations = 0, mdo_backedges = 0; - int mdo_invocations_start = 0, mdo_backedges_start = 0; - if (mdh != NULL) { - mdo_invocations = mdh->invocation_count(); - mdo_backedges = mdh->backedge_count(); - mdo_invocations_start = mdh->invocation_count_start(); - mdo_backedges_start = mdh->backedge_count_start(); - } - tty->print(" %stotal=%d,%d %smdo=%d(%d),%d(%d)", prefix, - invocation_count, backedge_count, prefix, - mdo_invocations, mdo_invocations_start, - mdo_backedges, mdo_backedges_start); - tty->print(" %smax levels=%d,%d", prefix, - mh->highest_comp_level(), mh->highest_osr_comp_level()); -} - -// Print an event. -void TieredThresholdPolicy::print_event(EventType type, const methodHandle& mh, const methodHandle& imh, - int bci, CompLevel level) { - bool inlinee_event = mh() != imh(); - - ttyLocker tty_lock; - tty->print("%lf: [", os::elapsedTime()); - - switch(type) { - case CALL: - tty->print("call"); - break; - case LOOP: - tty->print("loop"); - break; - case COMPILE: - tty->print("compile"); - break; - case REMOVE_FROM_QUEUE: - tty->print("remove-from-queue"); - break; - case UPDATE_IN_QUEUE: - tty->print("update-in-queue"); - break; - case REPROFILE: - tty->print("reprofile"); - break; - case MAKE_NOT_ENTRANT: - tty->print("make-not-entrant"); - break; - default: - tty->print("unknown"); - } - - tty->print(" level=%d ", level); - - ResourceMark rm; - char *method_name = mh->name_and_sig_as_C_string(); - tty->print("[%s", method_name); - if (inlinee_event) { - char *inlinee_name = imh->name_and_sig_as_C_string(); - tty->print(" [%s]] ", inlinee_name); - } - else tty->print("] "); - tty->print("@%d queues=%d,%d", bci, CompileBroker::queue_size(CompLevel_full_profile), - CompileBroker::queue_size(CompLevel_full_optimization)); - - print_specific(type, mh, imh, bci, level); - - if (type != COMPILE) { - print_counters("", mh); - if (inlinee_event) { - print_counters("inlinee ", imh); - } - tty->print(" compilable="); - bool need_comma = false; - if (!mh->is_not_compilable(CompLevel_full_profile)) { - tty->print("c1"); - need_comma = true; - } - if (!mh->is_not_osr_compilable(CompLevel_full_profile)) { - if (need_comma) tty->print(","); - tty->print("c1-osr"); - need_comma = true; - } - if (!mh->is_not_compilable(CompLevel_full_optimization)) { - if (need_comma) tty->print(","); - tty->print("c2"); - need_comma = true; - } - if (!mh->is_not_osr_compilable(CompLevel_full_optimization)) { - if (need_comma) tty->print(","); - tty->print("c2-osr"); - } - tty->print(" status="); - if (mh->queued_for_compilation()) { - tty->print("in-queue"); - } else tty->print("idle"); - } - tty->print_cr("]"); -} - -void TieredThresholdPolicy::initialize() { - int count = CICompilerCount; - bool c1_only = TieredStopAtLevel < CompLevel_full_optimization; -#ifdef _LP64 - // Turn on ergonomic compiler count selection - if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) { - FLAG_SET_DEFAULT(CICompilerCountPerCPU, true); - } - if (CICompilerCountPerCPU) { - // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n - int log_cpu = log2_int(os::active_processor_count()); - int loglog_cpu = log2_int(MAX2(log_cpu, 1)); - count = MAX2(log_cpu * loglog_cpu * 3 / 2, 2); - // Make sure there is enough space in the code cache to hold all the compiler buffers - size_t c1_size = Compiler::code_buffer_size(); - size_t c2_size = C2Compiler::initial_code_buffer_size(); - size_t buffer_size = c1_only ? c1_size : (c1_size/3 + 2*c2_size/3); - int max_count = (ReservedCodeCacheSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / (int)buffer_size; - if (count > max_count) { - // Lower the compiler count such that all buffers fit into the code cache - count = MAX2(max_count, c1_only ? 1 : 2); - } - FLAG_SET_ERGO(CICompilerCount, count); - } -#else - // On 32-bit systems, the number of compiler threads is limited to 3. - // On these systems, the virtual address space available to the JVM - // is usually limited to 2-4 GB (the exact value depends on the platform). - // As the compilers (especially C2) can consume a large amount of - // memory, scaling the number of compiler threads with the number of - // available cores can result in the exhaustion of the address space - /// available to the VM and thus cause the VM to crash. - if (FLAG_IS_DEFAULT(CICompilerCount)) { - count = 3; - FLAG_SET_ERGO(CICompilerCount, count); - } -#endif - - if (c1_only) { - // No C2 compiler thread required - set_c1_count(count); - } else { - set_c1_count(MAX2(count / 3, 1)); - set_c2_count(MAX2(count - c1_count(), 1)); - } - assert(count == c1_count() + c2_count(), "inconsistent compiler thread count"); - - // Some inlining tuning -#ifdef X86 - if (FLAG_IS_DEFAULT(InlineSmallCode)) { - FLAG_SET_DEFAULT(InlineSmallCode, 2000); - } -#endif - -#if defined SPARC || defined AARCH64 - if (FLAG_IS_DEFAULT(InlineSmallCode)) { - FLAG_SET_DEFAULT(InlineSmallCode, 2500); - } -#endif - - set_increase_threshold_at_ratio(); - set_start_time(os::javaTimeMillis()); -} - -void TieredThresholdPolicy::set_carry_if_necessary(InvocationCounter *counter) { - if (!counter->carry() && counter->count() > InvocationCounter::count_limit / 2) { - counter->set_carry_flag(); - } -} - -// Set carry flags on the counters if necessary -void TieredThresholdPolicy::handle_counter_overflow(Method* method) { - MethodCounters *mcs = method->method_counters(); - if (mcs != NULL) { - set_carry_if_necessary(mcs->invocation_counter()); - set_carry_if_necessary(mcs->backedge_counter()); - } - MethodData* mdo = method->method_data(); - if (mdo != NULL) { - set_carry_if_necessary(mdo->invocation_counter()); - set_carry_if_necessary(mdo->backedge_counter()); - } -} - -// Called with the queue locked and with at least one element -CompileTask* TieredThresholdPolicy::select_task(CompileQueue* compile_queue) { - CompileTask *max_blocking_task = NULL; - CompileTask *max_task = NULL; - Method* max_method = NULL; - jlong t = os::javaTimeMillis(); - // Iterate through the queue and find a method with a maximum rate. - for (CompileTask* task = compile_queue->first(); task != NULL;) { - CompileTask* next_task = task->next(); - Method* method = task->method(); - // If a method was unloaded or has been stale for some time, remove it from the queue. - // Blocking tasks and tasks submitted from whitebox API don't become stale - if (task->is_unloaded() || (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method))) { - if (!task->is_unloaded()) { - if (PrintTieredEvents) { - print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel) task->comp_level()); - } - method->clear_queued_for_compilation(); - } - compile_queue->remove_and_mark_stale(task); - task = next_task; - continue; - } - update_rate(t, method); - if (max_task == NULL || compare_methods(method, max_method)) { - // Select a method with the highest rate - max_task = task; - max_method = method; - } - - if (task->is_blocking()) { - if (max_blocking_task == NULL || compare_methods(method, max_blocking_task->method())) { - max_blocking_task = task; - } - } - - task = next_task; - } - - if (max_blocking_task != NULL) { - // In blocking compilation mode, the CompileBroker will make - // compilations submitted by a JVMCI compiler thread non-blocking. These - // compilations should be scheduled after all blocking compilations - // to service non-compiler related compilations sooner and reduce the - // chance of such compilations timing out. - max_task = max_blocking_task; - max_method = max_task->method(); - } - - if (max_task != NULL && max_task->comp_level() == CompLevel_full_profile && - TieredStopAtLevel > CompLevel_full_profile && - max_method != NULL && is_method_profiled(max_method)) { - max_task->set_comp_level(CompLevel_limited_profile); - - if (CompileBroker::compilation_is_complete(max_method, max_task->osr_bci(), CompLevel_limited_profile)) { - if (PrintTieredEvents) { - print_event(REMOVE_FROM_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level()); - } - compile_queue->remove_and_mark_stale(max_task); - max_method->clear_queued_for_compilation(); - return NULL; - } - - if (PrintTieredEvents) { - print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level()); - } - } - - return max_task; -} - -void TieredThresholdPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) { - for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) { - if (PrintTieredEvents) { - methodHandle mh(sd->method()); - print_event(REPROFILE, mh, mh, InvocationEntryBci, CompLevel_none); - } - MethodData* mdo = sd->method()->method_data(); - if (mdo != NULL) { - mdo->reset_start_counters(); - } - if (sd->is_top()) break; - } -} - -nmethod* TieredThresholdPolicy::event(const methodHandle& method, const methodHandle& inlinee, - int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) { - if (comp_level == CompLevel_none && - JvmtiExport::can_post_interpreter_events() && - thread->is_interp_only_mode()) { - return NULL; - } - if (ReplayCompiles) { - // Don't trigger other compiles in testing mode - return NULL; - } - - handle_counter_overflow(method()); - if (method() != inlinee()) { - handle_counter_overflow(inlinee()); - } - - if (PrintTieredEvents) { - print_event(bci == InvocationEntryBci ? CALL : LOOP, method, inlinee, bci, comp_level); - } - - if (bci == InvocationEntryBci) { - method_invocation_event(method, inlinee, comp_level, nm, thread); - } else { - // method == inlinee if the event originated in the main method - method_back_branch_event(method, inlinee, bci, comp_level, nm, thread); - // Check if event led to a higher level OSR compilation - CompLevel expected_comp_level = comp_level; - if (inlinee->is_not_osr_compilable(expected_comp_level)) { - // It's not possble to reach the expected level so fall back to simple. - expected_comp_level = CompLevel_simple; - } - nmethod* osr_nm = inlinee->lookup_osr_nmethod_for(bci, expected_comp_level, false); - assert(osr_nm == NULL || osr_nm->comp_level() >= expected_comp_level, "lookup_osr_nmethod_for is broken"); - if (osr_nm != NULL) { - // Perform OSR with new nmethod - return osr_nm; - } - } - return NULL; -} - -// Check if the method can be compiled, change level if necessary -void TieredThresholdPolicy::compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) { - assert(level <= TieredStopAtLevel, "Invalid compilation level"); - if (level == CompLevel_none) { - return; - } - if (level == CompLevel_aot) { - if (mh->has_aot_code()) { - if (PrintTieredEvents) { - print_event(COMPILE, mh, mh, bci, level); - } - MutexLocker ml(Compile_lock); - NoSafepointVerifier nsv; - if (mh->has_aot_code() && mh->code() != mh->aot_code()) { - mh->aot_code()->make_entrant(); - if (mh->has_compiled_code()) { - mh->code()->make_not_entrant(); - } - MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag); - Method::set_code(mh, mh->aot_code()); - } - } - return; - } - - // Check if the method can be compiled. If it cannot be compiled with C1, continue profiling - // in the interpreter and then compile with C2 (the transition function will request that, - // see common() ). If the method cannot be compiled with C2 but still can with C1, compile it with - // pure C1. - if ((bci == InvocationEntryBci && !can_be_compiled(mh, level))) { - if (level == CompLevel_full_optimization && can_be_compiled(mh, CompLevel_simple)) { - compile(mh, bci, CompLevel_simple, thread); - } - return; - } - if ((bci != InvocationEntryBci && !can_be_osr_compiled(mh, level))) { - if (level == CompLevel_full_optimization && can_be_osr_compiled(mh, CompLevel_simple)) { - nmethod* osr_nm = mh->lookup_osr_nmethod_for(bci, CompLevel_simple, false); - if (osr_nm != NULL && osr_nm->comp_level() > CompLevel_simple) { - // Invalidate the existing OSR nmethod so that a compile at CompLevel_simple is permitted. - osr_nm->make_not_entrant(); - } - compile(mh, bci, CompLevel_simple, thread); - } - return; - } - if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) { - return; - } - if (!CompileBroker::compilation_is_in_queue(mh)) { - if (PrintTieredEvents) { - print_event(COMPILE, mh, mh, bci, level); - } - submit_compile(mh, bci, level, thread); - } -} - -// Update the rate and submit compile -void TieredThresholdPolicy::submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) { - int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count(); - update_rate(os::javaTimeMillis(), mh()); - CompileBroker::compile_method(mh, bci, level, mh, hot_count, CompileTask::Reason_Tiered, thread); -} - -// Print an event. -void TieredThresholdPolicy::print_specific(EventType type, const methodHandle& mh, const methodHandle& imh, - int bci, CompLevel level) { - tty->print(" rate="); - if (mh->prev_time() == 0) tty->print("n/a"); - else tty->print("%f", mh->rate()); - - tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback), - threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback)); - -} - -// update_rate() is called from select_task() while holding a compile queue lock. -void TieredThresholdPolicy::update_rate(jlong t, Method* m) { - // Skip update if counters are absent. - // Can't allocate them since we are holding compile queue lock. - if (m->method_counters() == NULL) return; - - if (is_old(m)) { - // We don't remove old methods from the queue, - // so we can just zero the rate. - m->set_rate(0); - return; - } - - // We don't update the rate if we've just came out of a safepoint. - // delta_s is the time since last safepoint in milliseconds. - jlong delta_s = t - SafepointTracing::end_of_last_safepoint_epoch_ms(); - jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement - // How many events were there since the last time? - int event_count = m->invocation_count() + m->backedge_count(); - int delta_e = event_count - m->prev_event_count(); - - // We should be running for at least 1ms. - if (delta_s >= TieredRateUpdateMinTime) { - // And we must've taken the previous point at least 1ms before. - if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) { - m->set_prev_time(t); - m->set_prev_event_count(event_count); - m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond - } else { - if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) { - // If nothing happened for 25ms, zero the rate. Don't modify prev values. - m->set_rate(0); - } - } - } -} - -// Check if this method has been stale for a given number of milliseconds. -// See select_task(). -bool TieredThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) { - jlong delta_s = t - SafepointTracing::end_of_last_safepoint_epoch_ms(); - jlong delta_t = t - m->prev_time(); - if (delta_t > timeout && delta_s > timeout) { - int event_count = m->invocation_count() + m->backedge_count(); - int delta_e = event_count - m->prev_event_count(); - // Return true if there were no events. - return delta_e == 0; - } - return false; -} - -// We don't remove old methods from the compile queue even if they have -// very low activity. See select_task(). -bool TieredThresholdPolicy::is_old(Method* method) { - return method->invocation_count() > 50000 || method->backedge_count() > 500000; -} - -double TieredThresholdPolicy::weight(Method* method) { - return (double)(method->rate() + 1) * - (method->invocation_count() + 1) * (method->backedge_count() + 1); -} - -// Apply heuristics and return true if x should be compiled before y -bool TieredThresholdPolicy::compare_methods(Method* x, Method* y) { - if (x->highest_comp_level() > y->highest_comp_level()) { - // recompilation after deopt - return true; - } else - if (x->highest_comp_level() == y->highest_comp_level()) { - if (weight(x) > weight(y)) { - return true; - } - } - return false; -} - -// Is method profiled enough? -bool TieredThresholdPolicy::is_method_profiled(Method* method) { - MethodData* mdo = method->method_data(); - if (mdo != NULL) { - int i = mdo->invocation_count_delta(); - int b = mdo->backedge_count_delta(); - return call_predicate_helper(i, b, 1, method); - } - return false; -} - -double TieredThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) { - double queue_size = CompileBroker::queue_size(level); - int comp_count = compiler_count(level); - double k = queue_size / (feedback_k * comp_count) + 1; - - // Increase C1 compile threshold when the code cache is filled more - // than specified by IncreaseFirstTierCompileThresholdAt percentage. - // The main intention is to keep enough free space for C2 compiled code - // to achieve peak performance if the code cache is under stress. - if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization)) { - double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level)); - if (current_reverse_free_ratio > _increase_threshold_at_ratio) { - k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio); - } - } - return k; -} - -// Call and loop predicates determine whether a transition to a higher -// compilation level should be performed (pointers to predicate functions -// are passed to common()). -// Tier?LoadFeedback is basically a coefficient that determines of -// how many methods per compiler thread can be in the queue before -// the threshold values double. -bool TieredThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) { - switch(cur_level) { - case CompLevel_aot: { - double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); - return loop_predicate_helper(i, b, k, method); - } - case CompLevel_none: - case CompLevel_limited_profile: { - double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); - return loop_predicate_helper(i, b, k, method); - } - case CompLevel_full_profile: { - double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback); - return loop_predicate_helper(i, b, k, method); - } - default: - return true; - } -} - -bool TieredThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) { - switch(cur_level) { - case CompLevel_aot: { - double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); - return call_predicate_helper(i, b, k, method); - } - case CompLevel_none: - case CompLevel_limited_profile: { - double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); - return call_predicate_helper(i, b, k, method); - } - case CompLevel_full_profile: { - double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback); - return call_predicate_helper(i, b, k, method); - } - default: - return true; - } -} - -// Determine is a method is mature. -bool TieredThresholdPolicy::is_mature(Method* method) { - if (should_compile_at_level_simple(method)) return true; - MethodData* mdo = method->method_data(); - if (mdo != NULL) { - int i = mdo->invocation_count(); - int b = mdo->backedge_count(); - double k = ProfileMaturityPercentage / 100.0; - return call_predicate_helper(i, b, k, method) || - loop_predicate_helper(i, b, k, method); - } - return false; -} - -// If a method is old enough and is still in the interpreter we would want to -// start profiling without waiting for the compiled method to arrive. -// We also take the load on compilers into the account. -bool TieredThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) { - if (cur_level == CompLevel_none && - CompileBroker::queue_size(CompLevel_full_optimization) <= - Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { - int i = method->invocation_count(); - int b = method->backedge_count(); - double k = Tier0ProfilingStartPercentage / 100.0; - return call_predicate_helper(i, b, k, method) || loop_predicate_helper(i, b, k, method); - } - return false; -} - -// Inlining control: if we're compiling a profiled method with C1 and the callee -// is known to have OSRed in a C2 version, don't inline it. -bool TieredThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) { - CompLevel comp_level = (CompLevel)env->comp_level(); - if (comp_level == CompLevel_full_profile || - comp_level == CompLevel_limited_profile) { - return callee->highest_osr_comp_level() == CompLevel_full_optimization; - } - return false; -} - -// Create MDO if necessary. -void TieredThresholdPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) { - if (mh->is_native() || - mh->is_abstract() || - mh->is_accessor() || - mh->is_constant_getter()) { - return; - } - if (mh->method_data() == NULL) { - Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR); - } -} - - -/* - * Method states: - * 0 - interpreter (CompLevel_none) - * 1 - pure C1 (CompLevel_simple) - * 2 - C1 with invocation and backedge counting (CompLevel_limited_profile) - * 3 - C1 with full profiling (CompLevel_full_profile) - * 4 - C2 (CompLevel_full_optimization) - * - * Common state transition patterns: - * a. 0 -> 3 -> 4. - * The most common path. But note that even in this straightforward case - * profiling can start at level 0 and finish at level 3. - * - * b. 0 -> 2 -> 3 -> 4. - * This case occurs when the load on C2 is deemed too high. So, instead of transitioning - * into state 3 directly and over-profiling while a method is in the C2 queue we transition to - * level 2 and wait until the load on C2 decreases. This path is disabled for OSRs. - * - * c. 0 -> (3->2) -> 4. - * In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough - * to enable the profiling to fully occur at level 0. In this case we change the compilation level - * of the method to 2 while the request is still in-queue, because it'll allow it to run much faster - * without full profiling while c2 is compiling. - * - * d. 0 -> 3 -> 1 or 0 -> 2 -> 1. - * After a method was once compiled with C1 it can be identified as trivial and be compiled to - * level 1. These transition can also occur if a method can't be compiled with C2 but can with C1. - * - * e. 0 -> 4. - * This can happen if a method fails C1 compilation (it will still be profiled in the interpreter) - * or because of a deopt that didn't require reprofiling (compilation won't happen in this case because - * the compiled version already exists). - * - * Note that since state 0 can be reached from any other state via deoptimization different loops - * are possible. - * - */ - -// Common transition function. Given a predicate determines if a method should transition to another level. -CompLevel TieredThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) { - CompLevel next_level = cur_level; - int i = method->invocation_count(); - int b = method->backedge_count(); - - if (should_compile_at_level_simple(method)) { - next_level = CompLevel_simple; - } else { - switch(cur_level) { - default: break; - case CompLevel_aot: { - // If we were at full profile level, would we switch to full opt? - if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) { - next_level = CompLevel_full_optimization; - } else if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= - Tier3DelayOff * compiler_count(CompLevel_full_optimization) && - (this->*p)(i, b, cur_level, method))) { - next_level = CompLevel_full_profile; - } - } - break; - case CompLevel_none: - // If we were at full profile level, would we switch to full opt? - if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) { - next_level = CompLevel_full_optimization; - } else if ((this->*p)(i, b, cur_level, method)) { -#if INCLUDE_JVMCI - if (EnableJVMCI && UseJVMCICompiler) { - // Since JVMCI takes a while to warm up, its queue inevitably backs up during - // early VM execution. As of 2014-06-13, JVMCI's inliner assumes that the root - // compilation method and all potential inlinees have mature profiles (which - // includes type profiling). If it sees immature profiles, JVMCI's inliner - // can perform pathologically bad (e.g., causing OutOfMemoryErrors due to - // exploring/inlining too many graphs). Since a rewrite of the inliner is - // in progress, we simply disable the dialing back heuristic for now and will - // revisit this decision once the new inliner is completed. - next_level = CompLevel_full_profile; - } else -#endif - { - // C1-generated fully profiled code is about 30% slower than the limited profile - // code that has only invocation and backedge counters. The observation is that - // if C2 queue is large enough we can spend too much time in the fully profiled code - // while waiting for C2 to pick the method from the queue. To alleviate this problem - // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long - // we choose to compile a limited profiled version and then recompile with full profiling - // when the load on C2 goes down. - if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > - Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { - next_level = CompLevel_limited_profile; - } else { - next_level = CompLevel_full_profile; - } - } - } - break; - case CompLevel_limited_profile: - if (is_method_profiled(method)) { - // Special case: we got here because this method was fully profiled in the interpreter. - next_level = CompLevel_full_optimization; - } else { - MethodData* mdo = method->method_data(); - if (mdo != NULL) { - if (mdo->would_profile()) { - if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= - Tier3DelayOff * compiler_count(CompLevel_full_optimization) && - (this->*p)(i, b, cur_level, method))) { - next_level = CompLevel_full_profile; - } - } else { - next_level = CompLevel_full_optimization; - } - } else { - // If there is no MDO we need to profile - if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= - Tier3DelayOff * compiler_count(CompLevel_full_optimization) && - (this->*p)(i, b, cur_level, method))) { - next_level = CompLevel_full_profile; - } - } - } - break; - case CompLevel_full_profile: - { - MethodData* mdo = method->method_data(); - if (mdo != NULL) { - if (mdo->would_profile()) { - int mdo_i = mdo->invocation_count_delta(); - int mdo_b = mdo->backedge_count_delta(); - if ((this->*p)(mdo_i, mdo_b, cur_level, method)) { - next_level = CompLevel_full_optimization; - } - } else { - next_level = CompLevel_full_optimization; - } - } - } - break; - } - } - return MIN2(next_level, (CompLevel)TieredStopAtLevel); -} - -// Determine if a method should be compiled with a normal entry point at a different level. -CompLevel TieredThresholdPolicy::call_event(Method* method, CompLevel cur_level, JavaThread * thread) { - CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), - common(&TieredThresholdPolicy::loop_predicate, method, cur_level, true)); - CompLevel next_level = common(&TieredThresholdPolicy::call_predicate, method, cur_level); - - // If OSR method level is greater than the regular method level, the levels should be - // equalized by raising the regular method level in order to avoid OSRs during each - // invocation of the method. - if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) { - MethodData* mdo = method->method_data(); - guarantee(mdo != NULL, "MDO should not be NULL"); - if (mdo->invocation_count() >= 1) { - next_level = CompLevel_full_optimization; - } - } else { - next_level = MAX2(osr_level, next_level); - } - return next_level; -} - -// Determine if we should do an OSR compilation of a given method. -CompLevel TieredThresholdPolicy::loop_event(Method* method, CompLevel cur_level, JavaThread* thread) { - CompLevel next_level = common(&TieredThresholdPolicy::loop_predicate, method, cur_level, true); - if (cur_level == CompLevel_none) { - // If there is a live OSR method that means that we deopted to the interpreter - // for the transition. - CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level); - if (osr_level > CompLevel_none) { - return osr_level; - } - } - return next_level; -} - -bool TieredThresholdPolicy::maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread) { - if (UseAOT) { - if (cur_level == CompLevel_full_profile || cur_level == CompLevel_none) { - // If the current level is full profile or interpreter and we're switching to any other level, - // activate the AOT code back first so that we won't waste time overprofiling. - compile(mh, InvocationEntryBci, CompLevel_aot, thread); - // Fall through for JIT compilation. - } - if (next_level == CompLevel_limited_profile && cur_level != CompLevel_aot && mh->has_aot_code()) { - // If the next level is limited profile, use the aot code (if there is any), - // since it's essentially the same thing. - compile(mh, InvocationEntryBci, CompLevel_aot, thread); - // Not need to JIT, we're done. - return true; - } - } - return false; -} - - -// Handle the invocation event. -void TieredThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh, - CompLevel level, CompiledMethod* nm, JavaThread* thread) { - if (should_create_mdo(mh(), level)) { - create_mdo(mh, thread); - } - CompLevel next_level = call_event(mh(), level, thread); - if (next_level != level) { - if (maybe_switch_to_aot(mh, level, next_level, thread)) { - // No JITting necessary - return; - } - if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) { - compile(mh, InvocationEntryBci, next_level, thread); - } - } -} - -// Handle the back branch event. Notice that we can compile the method -// with a regular entry from here. -void TieredThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh, - int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread) { - if (should_create_mdo(mh(), level)) { - create_mdo(mh, thread); - } - // Check if MDO should be created for the inlined method - if (should_create_mdo(imh(), level)) { - create_mdo(imh, thread); - } - - if (is_compilation_enabled()) { - CompLevel next_osr_level = loop_event(imh(), level, thread); - CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level(); - // At the very least compile the OSR version - if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) { - compile(imh, bci, next_osr_level, thread); - } - - // Use loop event as an opportunity to also check if there's been - // enough calls. - CompLevel cur_level, next_level; - if (mh() != imh()) { // If there is an enclosing method - if (level == CompLevel_aot) { - // Recompile the enclosing method to prevent infinite OSRs. Stay at AOT level while it's compiling. - if (max_osr_level != CompLevel_none && !CompileBroker::compilation_is_in_queue(mh)) { - compile(mh, InvocationEntryBci, MIN2((CompLevel)TieredStopAtLevel, CompLevel_full_profile), thread); - } - } else { - // Current loop event level is not AOT - guarantee(nm != NULL, "Should have nmethod here"); - cur_level = comp_level(mh()); - next_level = call_event(mh(), cur_level, thread); - - if (max_osr_level == CompLevel_full_optimization) { - // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts - bool make_not_entrant = false; - if (nm->is_osr_method()) { - // This is an osr method, just make it not entrant and recompile later if needed - make_not_entrant = true; - } else { - if (next_level != CompLevel_full_optimization) { - // next_level is not full opt, so we need to recompile the - // enclosing method without the inlinee - cur_level = CompLevel_none; - make_not_entrant = true; - } - } - if (make_not_entrant) { - if (PrintTieredEvents) { - int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci; - print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level); - } - nm->make_not_entrant(); - } - } - // Fix up next_level if necessary to avoid deopts - if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) { - next_level = CompLevel_full_profile; - } - if (cur_level != next_level) { - if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) { - compile(mh, InvocationEntryBci, next_level, thread); - } - } - } - } else { - cur_level = comp_level(mh()); - next_level = call_event(mh(), cur_level, thread); - if (next_level != cur_level) { - if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) { - compile(mh, InvocationEntryBci, next_level, thread); - } - } - } - } -} - -#endif diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/tieredThresholdPolicy.hpp --- a/src/hotspot/share/runtime/tieredThresholdPolicy.hpp Mon Oct 07 16:48:42 2019 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,278 +0,0 @@ -/* - * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_RUNTIME_TIEREDTHRESHOLDPOLICY_HPP -#define SHARE_RUNTIME_TIEREDTHRESHOLDPOLICY_HPP - -#include "code/nmethod.hpp" -#include "oops/methodData.hpp" -#include "runtime/compilationPolicy.hpp" -#include "utilities/globalDefinitions.hpp" - -#ifdef TIERED - -class CompileTask; -class CompileQueue; -/* - * The system supports 5 execution levels: - * * level 0 - interpreter - * * level 1 - C1 with full optimization (no profiling) - * * level 2 - C1 with invocation and backedge counters - * * level 3 - C1 with full profiling (level 2 + MDO) - * * level 4 - C2 - * - * Levels 0, 2 and 3 periodically notify the runtime about the current value of the counters - * (invocation counters and backedge counters). The frequency of these notifications is - * different at each level. These notifications are used by the policy to decide what transition - * to make. - * - * Execution starts at level 0 (interpreter), then the policy can decide either to compile the - * method at level 3 or level 2. The decision is based on the following factors: - * 1. The length of the C2 queue determines the next level. The observation is that level 2 - * is generally faster than level 3 by about 30%, therefore we would want to minimize the time - * a method spends at level 3. We should only spend the time at level 3 that is necessary to get - * adequate profiling. So, if the C2 queue is long enough it is more beneficial to go first to - * level 2, because if we transitioned to level 3 we would be stuck there until our C2 compile - * request makes its way through the long queue. When the load on C2 recedes we are going to - * recompile at level 3 and start gathering profiling information. - * 2. The length of C1 queue is used to dynamically adjust the thresholds, so as to introduce - * additional filtering if the compiler is overloaded. The rationale is that by the time a - * method gets compiled it can become unused, so it doesn't make sense to put too much onto the - * queue. - * - * After profiling is completed at level 3 the transition is made to level 4. Again, the length - * of the C2 queue is used as a feedback to adjust the thresholds. - * - * After the first C1 compile some basic information is determined about the code like the number - * of the blocks and the number of the loops. Based on that it can be decided that a method - * is trivial and compiling it with C1 will yield the same code. In this case the method is - * compiled at level 1 instead of 4. - * - * We also support profiling at level 0. If C1 is slow enough to produce the level 3 version of - * the code and the C2 queue is sufficiently small we can decide to start profiling in the - * interpreter (and continue profiling in the compiled code once the level 3 version arrives). - * If the profiling at level 0 is fully completed before level 3 version is produced, a level 2 - * version is compiled instead in order to run faster waiting for a level 4 version. - * - * Compile queues are implemented as priority queues - for each method in the queue we compute - * the event rate (the number of invocation and backedge counter increments per unit of time). - * When getting an element off the queue we pick the one with the largest rate. Maintaining the - * rate also allows us to remove stale methods (the ones that got on the queue but stopped - * being used shortly after that). -*/ - -/* Command line options: - * - Tier?InvokeNotifyFreqLog and Tier?BackedgeNotifyFreqLog control the frequency of method - * invocation and backedge notifications. Basically every n-th invocation or backedge a mutator thread - * makes a call into the runtime. - * - * - Tier?InvocationThreshold, Tier?CompileThreshold, Tier?BackEdgeThreshold, Tier?MinInvocationThreshold control - * compilation thresholds. - * Level 2 thresholds are not used and are provided for option-compatibility and potential future use. - * Other thresholds work as follows: - * - * Transition from interpreter (level 0) to C1 with full profiling (level 3) happens when - * the following predicate is true (X is the level): - * - * i > TierXInvocationThreshold * s || (i > TierXMinInvocationThreshold * s && i + b > TierXCompileThreshold * s), - * - * where $i$ is the number of method invocations, $b$ number of backedges and $s$ is the scaling - * coefficient that will be discussed further. - * The intuition is to equalize the time that is spend profiling each method. - * The same predicate is used to control the transition from level 3 to level 4 (C2). It should be - * noted though that the thresholds are relative. Moreover i and b for the 0->3 transition come - * from Method* and for 3->4 transition they come from MDO (since profiled invocations are - * counted separately). Finally, if a method does not contain anything worth profiling, a transition - * from level 3 to level 4 occurs without considering thresholds (e.g., with fewer invocations than - * what is specified by Tier4InvocationThreshold). - * - * OSR transitions are controlled simply with b > TierXBackEdgeThreshold * s predicates. - * - * - Tier?LoadFeedback options are used to automatically scale the predicates described above depending - * on the compiler load. The scaling coefficients are computed as follows: - * - * s = queue_size_X / (TierXLoadFeedback * compiler_count_X) + 1, - * - * where queue_size_X is the current size of the compiler queue of level X, and compiler_count_X - * is the number of level X compiler threads. - * - * Basically these parameters describe how many methods should be in the compile queue - * per compiler thread before the scaling coefficient increases by one. - * - * This feedback provides the mechanism to automatically control the flow of compilation requests - * depending on the machine speed, mutator load and other external factors. - * - * - Tier3DelayOn and Tier3DelayOff parameters control another important feedback loop. - * Consider the following observation: a method compiled with full profiling (level 3) - * is about 30% slower than a method at level 2 (just invocation and backedge counters, no MDO). - * Normally, the following transitions will occur: 0->3->4. The problem arises when the C2 queue - * gets congested and the 3->4 transition is delayed. While the method is the C2 queue it continues - * executing at level 3 for much longer time than is required by the predicate and at suboptimal speed. - * The idea is to dynamically change the behavior of the system in such a way that if a substantial - * load on C2 is detected we would first do the 0->2 transition allowing a method to run faster. - * And then when the load decreases to allow 2->3 transitions. - * - * Tier3Delay* parameters control this switching mechanism. - * Tier3DelayOn is the number of methods in the C2 queue per compiler thread after which the policy - * no longer does 0->3 transitions but does 0->2 transitions instead. - * Tier3DelayOff switches the original behavior back when the number of methods in the C2 queue - * per compiler thread falls below the specified amount. - * The hysteresis is necessary to avoid jitter. - * - * - TieredCompileTaskTimeout is the amount of time an idle method can spend in the compile queue. - * Basically, since we use the event rate d(i + b)/dt as a value of priority when selecting a method to - * compile from the compile queue, we also can detect stale methods for which the rate has been - * 0 for some time in the same iteration. Stale methods can appear in the queue when an application - * abruptly changes its behavior. - * - * - TieredStopAtLevel, is used mostly for testing. It allows to bypass the policy logic and stick - * to a given level. For example it's useful to set TieredStopAtLevel = 1 in order to compile everything - * with pure c1. - * - * - Tier0ProfilingStartPercentage allows the interpreter to start profiling when the inequalities in the - * 0->3 predicate are already exceeded by the given percentage but the level 3 version of the - * method is still not ready. We can even go directly from level 0 to 4 if c1 doesn't produce a compiled - * version in time. This reduces the overall transition to level 4 and decreases the startup time. - * Note that this behavior is also guarded by the Tier3Delay mechanism: when the c2 queue is too long - * these is not reason to start profiling prematurely. - * - * - TieredRateUpdateMinTime and TieredRateUpdateMaxTime are parameters of the rate computation. - * Basically, the rate is not computed more frequently than TieredRateUpdateMinTime and is considered - * to be zero if no events occurred in TieredRateUpdateMaxTime. - */ - -class TieredThresholdPolicy : public CompilationPolicy { - jlong _start_time; - int _c1_count, _c2_count; - - // Check if the counter is big enough and set carry (effectively infinity). - inline void set_carry_if_necessary(InvocationCounter *counter); - // Set carry flags in the counters (in Method* and MDO). - inline void handle_counter_overflow(Method* method); - // Call and loop predicates determine whether a transition to a higher compilation - // level should be performed (pointers to predicate functions are passed to common_TF(). - // Predicates also take compiler load into account. - typedef bool (TieredThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level, Method* method); - bool call_predicate(int i, int b, CompLevel cur_level, Method* method); - bool loop_predicate(int i, int b, CompLevel cur_level, Method* method); - // Common transition function. Given a predicate determines if a method should transition to another level. - CompLevel common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback = false); - // Transition functions. - // call_event determines if a method should be compiled at a different - // level with a regular invocation entry. - CompLevel call_event(Method* method, CompLevel cur_level, JavaThread* thread); - // loop_event checks if a method should be OSR compiled at a different - // level. - CompLevel loop_event(Method* method, CompLevel cur_level, JavaThread* thread); - void print_counters(const char* prefix, const methodHandle& mh); - // Has a method been long around? - // We don't remove old methods from the compile queue even if they have - // very low activity (see select_task()). - inline bool is_old(Method* method); - // Was a given method inactive for a given number of milliseconds. - // If it is, we would remove it from the queue (see select_task()). - inline bool is_stale(jlong t, jlong timeout, Method* m); - // Compute the weight of the method for the compilation scheduling - inline double weight(Method* method); - // Apply heuristics and return true if x should be compiled before y - inline bool compare_methods(Method* x, Method* y); - // Compute event rate for a given method. The rate is the number of event (invocations + backedges) - // per millisecond. - inline void update_rate(jlong t, Method* m); - // Compute threshold scaling coefficient - inline double threshold_scale(CompLevel level, int feedback_k); - // If a method is old enough and is still in the interpreter we would want to - // start profiling without waiting for the compiled method to arrive. This function - // determines whether we should do that. - inline bool should_create_mdo(Method* method, CompLevel cur_level); - // Create MDO if necessary. - void create_mdo(const methodHandle& mh, JavaThread* thread); - // Is method profiled enough? - bool is_method_profiled(Method* method); - - double _increase_threshold_at_ratio; - - bool maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread); - - int c1_count() const { return _c1_count; } - int c2_count() const { return _c2_count; } - void set_c1_count(int x) { _c1_count = x; } - void set_c2_count(int x) { _c2_count = x; } - - enum EventType { CALL, LOOP, COMPILE, REMOVE_FROM_QUEUE, UPDATE_IN_QUEUE, REPROFILE, MAKE_NOT_ENTRANT }; - void print_event(EventType type, const methodHandle& mh, const methodHandle& imh, int bci, CompLevel level); - // Print policy-specific information if necessary - void print_specific(EventType type, const methodHandle& mh, const methodHandle& imh, int bci, CompLevel level); - // Check if the method can be compiled, change level if necessary - void compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread); - // Submit a given method for compilation - void submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread); - // Simple methods are as good being compiled with C1 as C2. - // This function tells if it's such a function. - inline static bool is_trivial(Method* method); - // Force method to be compiled at CompLevel_simple? - inline static bool should_compile_at_level_simple(Method* method); - - // Predicate helpers are used by .*_predicate() methods as well as others. - // They check the given counter values, multiplied by the scale against the thresholds. - template static inline bool call_predicate_helper(int i, int b, double scale, Method* method); - template static inline bool loop_predicate_helper(int i, int b, double scale, Method* method); - - // Get a compilation level for a given method. - static CompLevel comp_level(Method* method); - void method_invocation_event(const methodHandle& method, const methodHandle& inlinee, - CompLevel level, CompiledMethod* nm, JavaThread* thread); - void method_back_branch_event(const methodHandle& method, const methodHandle& inlinee, - int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread); - - void set_increase_threshold_at_ratio() { _increase_threshold_at_ratio = 100 / (100 - (double)IncreaseFirstTierCompileThresholdAt); } - void set_start_time(jlong t) { _start_time = t; } - jlong start_time() const { return _start_time; } - -public: - TieredThresholdPolicy() : _start_time(0), _c1_count(0), _c2_count(0) { } - virtual int compiler_count(CompLevel comp_level) { - if (is_c1_compile(comp_level)) return c1_count(); - if (is_c2_compile(comp_level)) return c2_count(); - return 0; - } - virtual CompLevel initial_compile_level() { return MIN2((CompLevel)TieredStopAtLevel, CompLevel_initial_compile); } - virtual void do_safepoint_work() { } - virtual void delay_compilation(Method* method) { } - virtual void disable_compilation(Method* method) { } - virtual void reprofile(ScopeDesc* trap_scope, bool is_osr); - virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, - int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread); - // Select task is called by CompileBroker. We should return a task or NULL. - virtual CompileTask* select_task(CompileQueue* compile_queue); - // Tell the runtime if we think a given method is adequately profiled. - virtual bool is_mature(Method* method); - // Initialize: set compiler thread count - virtual void initialize(); - virtual bool should_not_inline(ciEnv* env, ciMethod* callee); -}; - -#endif // TIERED - -#endif // SHARE_RUNTIME_TIEREDTHRESHOLDPOLICY_HPP diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/runtime/vmStructs.cpp --- a/src/hotspot/share/runtime/vmStructs.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/runtime/vmStructs.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -88,6 +88,7 @@ #include "runtime/globals.hpp" #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" +#include "runtime/notificationThread.hpp" #include "runtime/os.hpp" #include "runtime/perfMemory.hpp" #include "runtime/serviceThread.hpp" @@ -1366,6 +1367,7 @@ declare_type(JavaThread, Thread) \ declare_type(JvmtiAgentThread, JavaThread) \ declare_type(ServiceThread, JavaThread) \ + declare_type(NotificationThread, JavaThread) \ declare_type(CompilerThread, JavaThread) \ declare_type(CodeCacheSweeperThread, JavaThread) \ declare_toplevel_type(OSThread) \ diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/services/diagnosticFramework.cpp --- a/src/hotspot/share/services/diagnosticFramework.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/services/diagnosticFramework.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -437,9 +437,9 @@ } void DCmdFactory::push_jmx_notification_request() { - MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); + MutexLocker ml(Notification_lock, Mutex::_no_safepoint_check_flag); _has_pending_jmx_notification = true; - Service_lock->notify_all(); + Notification_lock->notify_all(); } void DCmdFactory::send_notification(TRAPS) { @@ -455,7 +455,7 @@ HandleMark hm(THREAD); bool notif = false; { - MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); + MutexLocker ml(Notification_lock, Mutex::_no_safepoint_check_flag); notif = _has_pending_jmx_notification; _has_pending_jmx_notification = false; } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/services/gcNotifier.cpp --- a/src/hotspot/share/services/gcNotifier.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/services/gcNotifier.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -54,18 +54,18 @@ } void GCNotifier::addRequest(GCNotificationRequest *request) { - MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); + MutexLocker ml(Notification_lock, Mutex::_no_safepoint_check_flag); if(first_request == NULL) { first_request = request; } else { last_request->next = request; } last_request = request; - Service_lock->notify_all(); + Notification_lock->notify_all(); } GCNotificationRequest *GCNotifier::getRequest() { - MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); + MutexLocker ml(Notification_lock, Mutex::_no_safepoint_check_flag); GCNotificationRequest *request = first_request; if(first_request != NULL) { first_request = first_request->next; diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/services/lowMemoryDetector.cpp --- a/src/hotspot/share/services/lowMemoryDetector.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/services/lowMemoryDetector.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -40,7 +40,7 @@ volatile jint LowMemoryDetector::_disabled_count = 0; bool LowMemoryDetector::has_pending_requests() { - assert(Service_lock->owned_by_self(), "Must own Service_lock"); + assert(Notification_lock->owned_by_self(), "Must own Notification_lock"); bool has_requests = false; int num_memory_pools = MemoryService::num_memory_pools(); for (int i = 0; i < num_memory_pools; i++) { @@ -62,7 +62,7 @@ ResourceMark rm(THREAD); HandleMark hm(THREAD); - // No need to hold Service_lock to call out to Java + // No need to hold Notification_lock to call out to Java int num_memory_pools = MemoryService::num_memory_pools(); for (int i = 0; i < num_memory_pools; i++) { MemoryPool* pool = MemoryService::get_memory_pool(i); @@ -80,7 +80,7 @@ // This method could be called from any Java threads // and also VMThread. void LowMemoryDetector::detect_low_memory() { - MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); + MutexLocker ml(Notification_lock, Mutex::_no_safepoint_check_flag); bool has_pending_requests = false; int num_memory_pools = MemoryService::num_memory_pools(); @@ -98,7 +98,7 @@ } if (has_pending_requests) { - Service_lock->notify_all(); + Notification_lock->notify_all(); } } @@ -113,14 +113,14 @@ } { - MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); + MutexLocker ml(Notification_lock, Mutex::_no_safepoint_check_flag); MemoryUsage usage = pool->get_memory_usage(); sensor->set_gauge_sensor_level(usage, pool->usage_threshold()); if (sensor->has_pending_requests()) { // notify sensor state update - Service_lock->notify_all(); + Notification_lock->notify_all(); } } } @@ -135,14 +135,14 @@ } { - MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); + MutexLocker ml(Notification_lock, Mutex::_no_safepoint_check_flag); MemoryUsage usage = pool->get_last_collection_usage(); sensor->set_counter_sensor_level(usage, pool->gc_usage_threshold()); if (sensor->has_pending_requests()) { // notify sensor state update - Service_lock->notify_all(); + Notification_lock->notify_all(); } } } @@ -205,7 +205,7 @@ // If the current level is between high and low threshold, no change. // void SensorInfo::set_gauge_sensor_level(MemoryUsage usage, ThresholdSupport* high_low_threshold) { - assert(Service_lock->owned_by_self(), "Must own Service_lock"); + assert(Notification_lock->owned_by_self(), "Must own Notification_lock"); assert(high_low_threshold->is_high_threshold_supported(), "just checking"); bool is_over_high = high_low_threshold->is_high_threshold_crossed(usage); @@ -260,7 +260,7 @@ // the sensor will be on (i.e. sensor is currently off // and has pending trigger requests). void SensorInfo::set_counter_sensor_level(MemoryUsage usage, ThresholdSupport* counter_threshold) { - assert(Service_lock->owned_by_self(), "Must own Service_lock"); + assert(Notification_lock->owned_by_self(), "Must own Notification_lock"); assert(counter_threshold->is_high_threshold_supported(), "just checking"); bool is_over_high = counter_threshold->is_high_threshold_crossed(usage); @@ -334,8 +334,8 @@ } { - // Holds Service_lock and update the sensor state - MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); + // Holds Notification_lock and update the sensor state + MutexLocker ml(Notification_lock, Mutex::_no_safepoint_check_flag); assert(_pending_trigger_count > 0, "Must have pending trigger"); _sensor_on = true; _sensor_count += count; @@ -345,8 +345,8 @@ void SensorInfo::clear(int count, TRAPS) { { - // Holds Service_lock and update the sensor state - MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); + // Holds Notification_lock and update the sensor state + MutexLocker ml(Notification_lock, Mutex::_no_safepoint_check_flag); if (_pending_clear_count == 0) { // Bail out if we lost a race to set_*_sensor_level() which may have // reactivated the sensor in the meantime because it was triggered again. diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/services/lowMemoryDetector.hpp --- a/src/hotspot/share/services/lowMemoryDetector.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/services/lowMemoryDetector.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -59,7 +59,8 @@ // // May need to deal with hysteresis effect. // -// Memory detection code runs in the Service thread (serviceThread.hpp). +// Memory detection code runs in the Notification thread or +// ServiceThread depending on UseNotificationThread flag. class OopClosure; class MemoryPool; @@ -214,6 +215,7 @@ class LowMemoryDetector : public AllStatic { friend class LowMemoryDetectorDisabler; friend class ServiceThread; + friend class NotificationThread; private: // true if any collected heap has low memory detection enabled static volatile bool _enabled_for_collected_pools; diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/services/management.cpp --- a/src/hotspot/share/services/management.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/services/management.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -44,6 +44,7 @@ #include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaCalls.hpp" #include "runtime/jniHandles.inline.hpp" +#include "runtime/notificationThread.hpp" #include "runtime/os.hpp" #include "runtime/serviceThread.hpp" #include "runtime/thread.inline.hpp" @@ -148,7 +149,9 @@ void Management::initialize(TRAPS) { // Start the service thread ServiceThread::initialize(); - + if (UseNotificationThread) { + NotificationThread::initialize(); + } if (ManagementServer) { ResourceMark rm(THREAD); HandleMark hm(THREAD); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/services/threadIdTable.cpp --- a/src/hotspot/share/services/threadIdTable.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/services/threadIdTable.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -46,6 +46,7 @@ static volatile size_t _items_count = 0; volatile bool ThreadIdTable::_is_initialized = false; +volatile bool ThreadIdTable::_has_work = false; class ThreadIdTableEntry : public CHeapObj { private: @@ -141,6 +142,26 @@ return (size_t)1 << _local_table->get_size_log2(Thread::current()); } +void ThreadIdTable::check_concurrent_work() { + if (_has_work) { + return; + } + + double load_factor = get_load_factor(); + // Resize if we have more items than preferred load factor + if ( load_factor > PREF_AVG_LIST_LEN && !_local_table->is_max_size_reached()) { + log_debug(thread, table)("Concurrent work triggered, load factor: %g", + load_factor); + trigger_concurrent_work(); + } +} + +void ThreadIdTable::trigger_concurrent_work() { + MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); + _has_work = true; + Service_lock->notify_all(); +} + void ThreadIdTable::grow(JavaThread* jt) { ThreadIdTableHash::GrowTask gt(_local_table); if (!gt.prepare(jt)) { @@ -192,13 +213,13 @@ } }; -void ThreadIdTable::grow_if_required() { - assert(Thread::current()->is_Java_thread(),"Must be Java thread"); +void ThreadIdTable::do_concurrent_work(JavaThread* jt) { assert(_is_initialized, "Thread table is not initialized"); + _has_work = false; double load_factor = get_load_factor(); log_debug(thread, table)("Concurrent work, load factor: %g", load_factor); if (load_factor > PREF_AVG_LIST_LEN && !_local_table->is_max_size_reached()) { - grow(JavaThread::current()); + grow(jt); } } @@ -215,7 +236,7 @@ // The hash table takes ownership of the ThreadTableEntry, // even if it's not inserted. if (_local_table->insert(thread, lookup, entry)) { - grow_if_required(); + check_concurrent_work(); return java_thread; } } diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/services/threadIdTable.hpp --- a/src/hotspot/share/services/threadIdTable.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/services/threadIdTable.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -36,6 +36,7 @@ friend class ThreadIdTableConfig; static volatile bool _is_initialized; + static volatile bool _has_work; public: // Initialization @@ -47,12 +48,17 @@ static JavaThread* add_thread(jlong tid, JavaThread* thread); static bool remove_thread(jlong tid); + // Growing + static bool has_work() { return _has_work; } + static void do_concurrent_work(JavaThread* jt); + private: static void create_table(size_t size); static size_t table_size(); static double get_load_factor(); - static void grow_if_required(); + static void check_concurrent_work(); + static void trigger_concurrent_work(); static void grow(JavaThread* jt); static void item_added(); diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/services/threadService.cpp --- a/src/hotspot/share/services/threadService.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/services/threadService.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -32,6 +32,7 @@ #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" +#include "prims/jvmtiRawMonitor.hpp" #include "runtime/atomic.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" @@ -217,10 +218,10 @@ } else { ObjectMonitor *enter_obj = thread->current_pending_monitor(); if (enter_obj != NULL) { - // thread is trying to enter() or raw_enter() an ObjectMonitor. + // thread is trying to enter() an ObjectMonitor. obj = (oop) enter_obj->object(); + assert(obj != NULL, "ObjectMonitor should have an associated object!"); } - // If obj == NULL, then ObjectMonitor is raw which doesn't count. } Handle h(Thread::current(), obj); @@ -354,13 +355,15 @@ } } -// Find deadlocks involving object monitors and concurrent locks if concurrent_locks is true +// Find deadlocks involving raw monitors, object monitors and concurrent locks +// if concurrent_locks is true. DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(ThreadsList * t_list, bool concurrent_locks) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); // This code was modified from the original Threads::find_deadlocks code. int globalDfn = 0, thisDfn; ObjectMonitor* waitingToLockMonitor = NULL; + JvmtiRawMonitor* waitingToLockRawMonitor = NULL; oop waitingToLockBlocker = NULL; bool blocked_on_monitor = false; JavaThread *currentThread, *previousThread; @@ -391,13 +394,30 @@ // When there is a deadlock, all the monitors involved in the dependency // cycle must be contended and heavyweight. So we only care about the // heavyweight monitor a thread is waiting to lock. - waitingToLockMonitor = (ObjectMonitor*)jt->current_pending_monitor(); + waitingToLockMonitor = jt->current_pending_monitor(); + // JVM TI raw monitors can also be involved in deadlocks, and we can be + // waiting to lock both a raw monitor and ObjectMonitor at the same time. + // It isn't clear how to make deadlock detection work correctly if that + // happens. + waitingToLockRawMonitor = jt->current_pending_raw_monitor(); + if (concurrent_locks) { waitingToLockBlocker = jt->current_park_blocker(); } - while (waitingToLockMonitor != NULL || waitingToLockBlocker != NULL) { + + while (waitingToLockMonitor != NULL || + waitingToLockRawMonitor != NULL || + waitingToLockBlocker != NULL) { cycle->add_thread(currentThread); - if (waitingToLockMonitor != NULL) { + // Give preference to the raw monitor + if (waitingToLockRawMonitor != NULL) { + Thread* owner = waitingToLockRawMonitor->owner(); + if (owner != NULL && // the raw monitor could be released at any time + owner->is_Java_thread()) { + // only JavaThreads can be reported here + currentThread = (JavaThread*) owner; + } + } else if (waitingToLockMonitor != NULL) { address currentOwner = (address)waitingToLockMonitor->owner(); if (currentOwner != NULL) { currentThread = Threads::owning_thread_from_monitor_owner(t_list, @@ -948,28 +968,44 @@ JavaThread* currentThread; ObjectMonitor* waitingToLockMonitor; + JvmtiRawMonitor* waitingToLockRawMonitor; oop waitingToLockBlocker; int len = _threads->length(); for (int i = 0; i < len; i++) { currentThread = _threads->at(i); - waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor(); + waitingToLockMonitor = currentThread->current_pending_monitor(); + waitingToLockRawMonitor = currentThread->current_pending_raw_monitor(); waitingToLockBlocker = currentThread->current_park_blocker(); st->cr(); st->print_cr("\"%s\":", currentThread->get_thread_name()); const char* owner_desc = ",\n which is held by"; + + // Note: As the JVM TI "monitor contended enter" event callback is executed after ObjectMonitor + // sets the current pending monitor, it is possible to then see a pending raw monitor as well. + if (waitingToLockRawMonitor != NULL) { + st->print(" waiting to lock JVM TI raw monitor " INTPTR_FORMAT, p2i(waitingToLockRawMonitor)); + Thread* owner = waitingToLockRawMonitor->owner(); + // Could be NULL as the raw monitor could be released at any time if held by non-JavaThread + if (owner != NULL) { + if (owner->is_Java_thread()) { + currentThread = (JavaThread*) owner; + st->print_cr("%s \"%s\"", owner_desc, currentThread->get_thread_name()); + } else { + st->print_cr(",\n which has now been released"); + } + } else { + st->print_cr("%s non-Java thread=" PTR_FORMAT, owner_desc, p2i(owner)); + } + } + if (waitingToLockMonitor != NULL) { st->print(" waiting to lock monitor " INTPTR_FORMAT, p2i(waitingToLockMonitor)); oop obj = (oop)waitingToLockMonitor->object(); - if (obj != NULL) { - st->print(" (object " INTPTR_FORMAT ", a %s)", p2i(obj), - obj->klass()->external_name()); + st->print(" (object " INTPTR_FORMAT ", a %s)", p2i(obj), + obj->klass()->external_name()); - if (!currentThread->current_pending_monitor_is_from_java()) { - owner_desc = "\n in JNI, which is held by"; - } - } else { - // No Java object associated - a JVMTI raw monitor - owner_desc = " (JVMTI raw monitor),\n which is held by"; + if (!currentThread->current_pending_monitor_is_from_java()) { + owner_desc = "\n in JNI, which is held by"; } currentThread = Threads::owning_thread_from_monitor_owner(t_list, (address)waitingToLockMonitor->owner()); @@ -978,7 +1014,7 @@ // that owns waitingToLockMonitor should be findable, but // if it is not findable, then the previous currentThread is // blocked permanently. - st->print("%s UNKNOWN_owner_addr=" PTR_FORMAT, owner_desc, + st->print_cr("%s UNKNOWN_owner_addr=" PTR_FORMAT, owner_desc, p2i(waitingToLockMonitor->owner())); continue; } @@ -992,11 +1028,10 @@ currentThread = java_lang_Thread::thread(ownerObj); assert(currentThread != NULL, "AbstractOwnableSynchronizer owning thread is unexpectedly NULL"); } - st->print("%s \"%s\"", owner_desc, currentThread->get_thread_name()); + st->print_cr("%s \"%s\"", owner_desc, currentThread->get_thread_name()); } st->cr(); - st->cr(); // Print stack traces bool oldJavaMonitorsInStackTrace = JavaMonitorsInStackTrace; diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/utilities/decoder.cpp --- a/src/hotspot/share/utilities/decoder.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/utilities/decoder.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -84,7 +84,7 @@ } bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const char* modulepath, bool demangle) { - bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid; + bool error_handling_thread = os::current_thread_id() == VMError::get_first_error_tid(); if (error_handling_thread) { return get_error_handler_instance()->decode(addr, buf, buflen, offset, modulepath, demangle); } else { @@ -95,7 +95,7 @@ } bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const void* base) { - bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid; + bool error_handling_thread = os::current_thread_id() == VMError::get_first_error_tid(); if (error_handling_thread) { return get_error_handler_instance()->decode(addr, buf, buflen, offset, base); } else { @@ -106,7 +106,7 @@ bool Decoder::demangle(const char* symbol, char* buf, int buflen) { - bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid; + bool error_handling_thread = os::current_thread_id() == VMError::get_first_error_tid(); if (error_handling_thread) { return get_error_handler_instance()->demangle(symbol, buf, buflen); } else { diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/utilities/vmError.cpp --- a/src/hotspot/share/utilities/vmError.cpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/utilities/vmError.cpp Wed Oct 16 15:31:05 2019 +0200 @@ -1205,7 +1205,7 @@ st->print_cr("END."); } -volatile intptr_t VMError::first_error_tid = -1; +volatile intptr_t VMError::_first_error_tid = -1; /** Expand a pattern into a buffer starting at pos and open a file using constructed path */ static int expand_and_open(const char* pattern, bool overwrite_existing, char* buf, size_t buflen, size_t pos) { @@ -1355,8 +1355,8 @@ os::abort(CreateCoredumpOnCrash); } intptr_t mytid = os::current_thread_id(); - if (first_error_tid == -1 && - Atomic::cmpxchg(mytid, &first_error_tid, (intptr_t)-1) == -1) { + if (_first_error_tid == -1 && + Atomic::cmpxchg(mytid, &_first_error_tid, (intptr_t)-1) == -1) { // Initialize time stamps to use the same base. out.time_stamp().update_to(1); @@ -1416,7 +1416,7 @@ // This is not the first error, see if it happened in a different thread // or in the same thread during error reporting. - if (first_error_tid != mytid) { + if (_first_error_tid != mytid) { char msgbuf[64]; jio_snprintf(msgbuf, sizeof(msgbuf), "[thread " INTX_FORMAT " also had an error]", diff -r 54c1ba464b78 -r 28c7e6711871 src/hotspot/share/utilities/vmError.hpp --- a/src/hotspot/share/utilities/vmError.hpp Mon Oct 07 16:48:42 2019 +0200 +++ b/src/hotspot/share/utilities/vmError.hpp Wed Oct 16 15:31:05 2019 +0200 @@ -32,8 +32,6 @@ class VM_ReportJavaOutOfMemory; class VMError : public AllStatic { - friend class VM_ReportJavaOutOfMemory; - friend class Decoder; friend class VMStructs; static int _id; // Solaris/Linux signals: 0 - SIGRTMAX @@ -65,7 +63,7 @@ // Thread id of the first error. We must be able to handle native thread, // so use thread id instead of Thread* to identify thread. - static volatile intptr_t first_error_tid; + static volatile intptr_t _first_error_tid; // Core dump status, false if we have been unable to write a core/minidump for some reason static bool coredump_status; @@ -177,9 +175,9 @@ static address get_resetted_sighandler(int sig); // check to see if fatal error reporting is in progress - static bool fatal_error_in_progress() { return first_error_tid != -1; } + static bool fatal_error_in_progress() { return _first_error_tid != -1; } - static intptr_t get_first_error_tid() { return first_error_tid; } + static intptr_t get_first_error_tid() { return _first_error_tid; } // Called by the WatcherThread to check if error reporting has timed-out. // Returns true if error reporting has not completed within the ErrorLogTimeout limit. diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/io/FilePermission.java --- a/src/java.base/share/classes/java/io/FilePermission.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/io/FilePermission.java Wed Oct 16 15:31:05 2019 +0200 @@ -367,12 +367,22 @@ this.mask = mask; if (cpath.equals("<>")) { + allFiles = true; directory = true; recursive = true; cpath = ""; return; } + // Validate path by platform's default file system + try { + String name = cpath.endsWith("*") ? cpath.substring(0, cpath.length() - 1) + "-" : cpath; + builtInFS.getPath(new File(name).getPath()); + } catch (InvalidPathException ipe) { + invalid = true; + return; + } + // store only the canonical cpath if possible cpath = AccessController.doPrivileged(new PrivilegedAction<>() { public String run() { @@ -463,6 +473,9 @@ *

* The default value of the {@code jdk.io.permissionsUseCanonicalPath} * system property is {@code false} in this implementation. + *

+ * The value can also be set with a security property using the same name, + * but setting a system property will override the security property value. * * @param path the pathname of the file/directory. * @param actions the action string. @@ -573,19 +586,19 @@ * @return the effective mask */ boolean impliesIgnoreMask(FilePermission that) { + if (this == that) { + return true; + } + if (allFiles) { + return true; + } + if (this.invalid || that.invalid) { + return false; + } + if (that.allFiles) { + return false; + } if (FilePermCompat.nb) { - if (this == that) { - return true; - } - if (allFiles) { - return true; - } - if (this.invalid || that.invalid) { - return false; - } - if (that.allFiles) { - return false; - } // Left at least same level of wildness as right if ((this.recursive && that.recursive) != that.recursive || (this.directory && that.directory) != that.directory) { @@ -783,10 +796,10 @@ FilePermission that = (FilePermission) obj; + if (this.invalid || that.invalid) { + return false; + } if (FilePermCompat.nb) { - if (this.invalid || that.invalid) { - return false; - } return (this.mask == that.mask) && (this.allFiles == that.allFiles) && this.npath.equals(that.npath) && @@ -795,6 +808,7 @@ (this.recursive == that.recursive); } else { return (this.mask == that.mask) && + (this.allFiles == that.allFiles) && this.cpath.equals(that.cpath) && (this.directory == that.directory) && (this.recursive == that.recursive); diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/lang/ClassLoader.java --- a/src/java.base/share/classes/java/lang/ClassLoader.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/lang/ClassLoader.java Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,6 @@ /* * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2004,6 +2005,17 @@ return scl; } + /* + * Initialize default paths for native libraries search. + * Must be done early as JDK may load libraries during bootstrap. + * + * @see java.lang.System#initPhase1 + */ + static void initLibraryPaths() { + usr_paths = initializePath("java.library.path"); + sys_paths = initializePath("sun.boot.library.path"); + } + // Returns true if the specified class loader can be found in this class // loader's delegation chain. boolean isAncestor(ClassLoader cl) { @@ -2473,8 +2485,7 @@ * * We use a static stack to hold the list of libraries we are * loading because this can happen only when called by the - * same thread because Runtime.load and Runtime.loadLibrary - * are synchronous. + * same thread because this block is synchronous. * * If there is a pending load operation for the library, we * immediately return success; otherwise, we raise @@ -2619,10 +2630,9 @@ boolean isAbsolute) { ClassLoader loader = (fromClass == null) ? null : fromClass.getClassLoader(); - if (sys_paths == null) { - usr_paths = initializePath("java.library.path"); - sys_paths = initializePath("sun.boot.library.path"); - } + assert sys_paths != null : "should be initialized at this point"; + assert usr_paths != null : "should be initialized at this point"; + if (isAbsolute) { if (loadLibrary0(fromClass, new File(name))) { return; diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/lang/Runtime.java --- a/src/java.base/share/classes/java/lang/Runtime.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/lang/Runtime.java Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,6 @@ /* * Copyright (c) 1995, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -732,7 +733,7 @@ load0(Reflection.getCallerClass(), filename); } - synchronized void load0(Class fromClass, String filename) { + void load0(Class fromClass, String filename) { SecurityManager security = System.getSecurityManager(); if (security != null) { security.checkLink(filename); @@ -794,14 +795,14 @@ loadLibrary0(Reflection.getCallerClass(), libname); } - synchronized void loadLibrary0(Class fromClass, String libname) { + void loadLibrary0(Class fromClass, String libname) { SecurityManager security = System.getSecurityManager(); if (security != null) { security.checkLink(libname); } if (libname.indexOf((int)File.separatorChar) != -1) { throw new UnsatisfiedLinkError( - "Directory separator should not appear in library name: " + libname); + "Directory separator should not appear in library name: " + libname); } ClassLoader.loadLibrary(fromClass, libname, false); } diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/lang/StringCoding.java --- a/src/java.base/share/classes/java/lang/StringCoding.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/lang/StringCoding.java Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -191,6 +191,12 @@ return result.with(StringLatin1.inflate(ba, off, len), UTF16); } } + // fastpath for always Latin1 decodable single byte + if (COMPACT_STRINGS && cd instanceof ArrayDecoder && ((ArrayDecoder)cd).isLatin1Decodable()) { + byte[] dst = new byte[len]; + ((ArrayDecoder)cd).decodeToLatin1(ba, off, len, dst); + return result.with(dst, LATIN1); + } int en = scale(len, cd.maxCharsPerByte()); char[] ca = new char[en]; if (cd instanceof ArrayDecoder) { @@ -278,6 +284,13 @@ ((ArrayDecoder)cd).isASCIICompatible() && !hasNegatives(ba, off, len)) { return decodeLatin1(ba, off, len); } + // fastpath for always Latin1 decodable single byte + if (COMPACT_STRINGS && cd instanceof ArrayDecoder && ((ArrayDecoder)cd).isLatin1Decodable()) { + byte[] dst = new byte[len]; + ((ArrayDecoder)cd).decodeToLatin1(ba, off, len, dst); + return new Result().with(dst, LATIN1); + } + int en = scale(len, cd.maxCharsPerByte()); if (len == 0) { return new Result().with(); diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/lang/System.java --- a/src/java.base/share/classes/java/lang/System.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/lang/System.java Wed Oct 16 15:31:05 2019 +0200 @@ -2045,6 +2045,8 @@ // register shared secrets setJavaLangAccess(); + ClassLoader.initLibraryPaths(); + // Subsystems that are invoked during initialization can invoke // VM.isBooted() in order to avoid doing things that should // wait until the VM is fully initialized. The initialization level diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/lang/Throwable.java --- a/src/java.base/share/classes/java/lang/Throwable.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/lang/Throwable.java Wed Oct 16 15:31:05 2019 +0200 @@ -230,6 +230,7 @@ * @serial * @since 1.7 */ + @SuppressWarnings("serial") // Not statically typed as Serializable private List suppressedExceptions = SUPPRESSED_SENTINEL; /** Message for trying to suppress a null exception. */ diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/lang/invoke/SerializedLambda.java --- a/src/java.base/share/classes/java/lang/invoke/SerializedLambda.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/lang/invoke/SerializedLambda.java Wed Oct 16 15:31:05 2019 +0200 @@ -76,6 +76,7 @@ private final String implMethodSignature; private final int implMethodKind; private final String instantiatedMethodType; + @SuppressWarnings("serial") // Not statically typed as Serializable private final Object[] capturedArgs; /** diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/lang/reflect/Proxy.java --- a/src/java.base/share/classes/java/lang/reflect/Proxy.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/lang/reflect/Proxy.java Wed Oct 16 15:31:05 2019 +0200 @@ -308,6 +308,7 @@ * the invocation handler for this proxy instance. * @serial */ + @SuppressWarnings("serial") // Not statically typed as Serializable protected InvocationHandler h; /** diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/net/DatagramSocket.java --- a/src/java.base/share/classes/java/net/DatagramSocket.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/net/DatagramSocket.java Wed Oct 16 15:31:05 2019 +0200 @@ -434,14 +434,15 @@ * verify that datagrams are permitted to be sent and received * respectively. * - *

When a socket is connected, {@link #receive receive} and - * {@link #send send} will not perform any security checks - * on incoming and outgoing packets, other than matching the packet's - * and the socket's address and port. On a send operation, if the - * packet's address is set and the packet's address and the socket's - * address do not match, an {@code IllegalArgumentException} will be - * thrown. A socket connected to a multicast address may only be used - * to send packets. + *

Care should be taken to ensure that a connected datagram socket + * is not shared with untrusted code. When a socket is connected, + * {@link #receive receive} and {@link #send send} will not perform + * any security checks on incoming and outgoing packets, other than + * matching the packet's and the socket's address and port. On a send + * operation, if the packet's address is set and the packet's address + * and the socket's address do not match, an {@code IllegalArgumentException} + * will be thrown. A socket connected to a multicast address may only + * be used to send packets. * * @param address the remote address for the socket * @@ -708,9 +709,11 @@ * the length of the received message. If the message is longer than * the packet's length, the message is truncated. *

- * If there is a security manager, a packet cannot be received if the - * security manager's {@code checkAccept} method - * does not allow it. + * If there is a security manager, and the socket is not currently + * connected to a remote address, a packet cannot be received if the + * security manager's {@code checkAccept} method does not allow it. + * Datagrams that are not permitted by the security manager are silently + * discarded. * * @param p the {@code DatagramPacket} into which to place * the incoming data. @@ -896,12 +899,15 @@ * * @param timeout the specified timeout in milliseconds. * @throws SocketException if there is an error in the underlying protocol, such as an UDP error. + * @throws IllegalArgumentException if {@code timeout} is negative * @since 1.1 * @see #getSoTimeout() */ public synchronized void setSoTimeout(int timeout) throws SocketException { if (isClosed()) throw new SocketException("Socket is closed"); + if (timeout < 0) + throw new IllegalArgumentException("timeout < 0"); getImpl().setOption(SocketOptions.SO_TIMEOUT, timeout); } diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/net/NetPermission.java --- a/src/java.base/share/classes/java/net/NetPermission.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/net/NetPermission.java Wed Oct 16 15:31:05 2019 +0200 @@ -145,6 +145,15 @@ * * * + * setSocketImpl + * The ability to create a sub-class of Socket or ServerSocket with a + * user specified SocketImpl. + * Malicious user-defined SocketImpls can change the behavior of + * Socket and ServerSocket in surprising ways, by virtue of their + * ability to access the protected fields of SocketImpl. + * + * + * * specifyStreamHandler * The ability * to specify a stream handler when constructing a URL diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/net/ServerSocket.java --- a/src/java.base/share/classes/java/net/ServerSocket.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/net/ServerSocket.java Wed Oct 16 15:31:05 2019 +0200 @@ -32,6 +32,7 @@ import java.util.Set; import java.util.Collections; +import sun.security.util.SecurityConstants; import sun.net.PlatformSocketImpl; /** @@ -73,13 +74,25 @@ * * @throws NullPointerException if impl is {@code null}. * + * @throws SecurityException if a security manager is set and + * its {@code checkPermission} method doesn't allow + * {@code NetPermission("setSocketImpl")}. * @since 12 */ protected ServerSocket(SocketImpl impl) { Objects.requireNonNull(impl); + checkPermission(); this.impl = impl; } + private static Void checkPermission() { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(SecurityConstants.SET_SOCKETIMPL_PERMISSION); + } + return null; + } + /** * Creates an unbound server socket. * diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/net/Socket.java --- a/src/java.base/share/classes/java/net/Socket.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/net/Socket.java Wed Oct 16 15:31:05 2019 +0200 @@ -25,6 +25,8 @@ package java.net; +import sun.security.util.SecurityConstants; + import java.io.InputStream; import java.io.OutputStream; import java.io.IOException; @@ -182,12 +184,28 @@ * * @throws SocketException if there is an error in the underlying protocol, * such as a TCP error. + * + * @throws SecurityException if {@code impl} is non-null and a security manager is set + * and its {@code checkPermission} method doesn't allow {@code NetPermission("setSocketImpl")}. + * * @since 1.1 */ protected Socket(SocketImpl impl) throws SocketException { + checkPermission(impl); this.impl = impl; } + private static Void checkPermission(SocketImpl impl) { + if (impl == null) { + return null; + } + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(SecurityConstants.SET_SOCKETIMPL_PERMISSION); + } + return null; + } + /** * Creates a stream socket and connects it to the specified port * number on the named host. diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/net/URL.java --- a/src/java.base/share/classes/java/net/URL.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/net/URL.java Wed Oct 16 15:31:05 2019 +0200 @@ -484,6 +484,16 @@ throw new MalformedURLException(s); } } + if ("jar".equalsIgnoreCase(protocol)) { + if (handler instanceof sun.net.www.protocol.jar.Handler) { + // URL.openConnection() would throw a confusing exception + // so generate a better exception here instead. + String s = ((sun.net.www.protocol.jar.Handler) handler).checkNestedProtocol(file); + if (s != null) { + throw new MalformedURLException(s); + } + } + } } /** diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/nio/channels/DatagramChannel.java --- a/src/java.base/share/classes/java/nio/channels/DatagramChannel.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/nio/channels/DatagramChannel.java Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -265,7 +265,10 @@ * java.lang.SecurityManager#checkAccept checkAccept} and {@link * java.lang.SecurityManager#checkConnect checkConnect} methods permit * datagrams to be received from and sent to, respectively, the given - * remote address. + * remote address. Once connected, no further security checks are performed + * for datagrams received from, or sent to, the given remote address. Care + * should be taken to ensure that a connected datagram channel is not shared + * with untrusted code. * *

This method may be invoked at any time. It will not have any effect * on read or write operations that are already in progress at the moment @@ -325,6 +328,10 @@ *

If this channel's socket is not connected, or if the channel is * closed, then invoking this method has no effect.

* + * @apiNote If this method throws an IOException, the channel's socket + * may be left in an unspecified state. It is strongly recommended that + * the channel be closed when disconnect fails. + * * @return This datagram channel * * @throws IOException @@ -369,9 +376,10 @@ * to a specific remote address and a security manager has been installed * then for each datagram received this method verifies that the source's * address and port number are permitted by the security manager's {@link - * java.lang.SecurityManager#checkAccept checkAccept} method. The overhead - * of this security check can be avoided by first connecting the socket via - * the {@link #connect connect} method. + * java.lang.SecurityManager#checkAccept checkAccept} method. Datagrams + * that are not permitted by the security manager are silently discarded. + * The overhead of this security check can be avoided by first connecting + * the socket via the {@link #connect connect} method. * *

This method may be invoked at any time. If another thread has * already initiated a read operation upon this channel, however, then an @@ -401,11 +409,6 @@ * closing the channel and setting the current thread's * interrupt status * - * @throws SecurityException - * If a security manager has been installed - * and it does not permit datagrams to be accepted - * from the datagram's sender - * * @throws IOException * If some other I/O error occurs */ diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/nio/channels/SelectionKey.java --- a/src/java.base/share/classes/java/nio/channels/SelectionKey.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/nio/channels/SelectionKey.java Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,8 @@ package java.nio.channels; -import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.VarHandle; /** * A token representing the registration of a {@link SelectableChannel} with a @@ -428,13 +429,17 @@ // -- Attachments -- + private static final VarHandle ATTACHMENT; + static { + try { + MethodHandles.Lookup l = MethodHandles.lookup(); + ATTACHMENT = l.findVarHandle(SelectionKey.class, "attachment", Object.class); + } catch (Exception e) { + throw new InternalError(e); + } + } private volatile Object attachment; - private static final AtomicReferenceFieldUpdater - attachmentUpdater = AtomicReferenceFieldUpdater.newUpdater( - SelectionKey.class, Object.class, "attachment" - ); - /** * Attaches the given object to this key. * @@ -450,7 +455,7 @@ * otherwise {@code null} */ public final Object attach(Object ob) { - return attachmentUpdater.getAndSet(this, ob); + return ATTACHMENT.getAndSet(this, ob); } /** diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/nio/file/Files.java --- a/src/java.base/share/classes/java/nio/file/Files.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/nio/file/Files.java Wed Oct 16 15:31:05 2019 +0200 @@ -3550,8 +3550,8 @@ // ensure lines is not null before opening file Objects.requireNonNull(lines); CharsetEncoder encoder = cs.newEncoder(); - OutputStream out = newOutputStream(path, options); - try (BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out, encoder))) { + try (OutputStream out = newOutputStream(path, options); + BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out, encoder))) { for (CharSequence line: lines) { writer.append(line); writer.newLine(); diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/security/GuardedObject.java --- a/src/java.base/share/classes/java/security/GuardedObject.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/security/GuardedObject.java Wed Oct 16 15:31:05 2019 +0200 @@ -52,7 +52,9 @@ @java.io.Serial private static final long serialVersionUID = -5240450096227834308L; + @SuppressWarnings("serial") // Not statically typed as Serializable private Object object; // the object we are guarding + @SuppressWarnings("serial") // Not statically typed as Serializable private Guard guard; // the guard /** diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/security/SecureRandom.java --- a/src/java.base/share/classes/java/security/SecureRandom.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/security/SecureRandom.java Wed Oct 16 15:31:05 2019 +0200 @@ -1043,6 +1043,7 @@ /** * @serial */ + @SuppressWarnings("serial") // Not statically typed as Serializable private MessageDigest digest = null; /** * @serial diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/text/DecimalFormat.java --- a/src/java.base/share/classes/java/text/DecimalFormat.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/text/DecimalFormat.java Wed Oct 16 15:31:05 2019 +0200 @@ -2756,7 +2756,10 @@ /** * Return the grouping size. Grouping size is the number of digits between * grouping separators in the integer portion of a number. For example, - * in the number "123,456.78", the grouping size is 3. + * in the number "123,456.78", the grouping size is 3. Grouping size of + * zero designates that grouping is not used, which provides the same + * formatting as if calling {@link #setGroupingUsed(boolean) + * setGroupingUsed(false)}. * * @return the grouping size * @see #setGroupingSize @@ -2770,16 +2773,28 @@ /** * Set the grouping size. Grouping size is the number of digits between * grouping separators in the integer portion of a number. For example, - * in the number "123,456.78", the grouping size is 3. - *
+ * in the number "123,456.78", the grouping size is 3. Grouping size of + * zero designates that grouping is not used, which provides the same + * formatting as if calling {@link #setGroupingUsed(boolean) + * setGroupingUsed(false)}. + *

* The value passed in is converted to a byte, which may lose information. + * Values that are negative or greater than + * {@link java.lang.Byte#MAX_VALUE Byte.MAX_VALUE}, will throw an + * {@code IllegalArgumentException}. * * @param newValue the new grouping size * @see #getGroupingSize * @see java.text.NumberFormat#setGroupingUsed * @see java.text.DecimalFormatSymbols#setGroupingSeparator + * @throws IllegalArgumentException if {@code newValue} is negative or + * greater than {@link java.lang.Byte#MAX_VALUE Byte.MAX_VALUE} */ public void setGroupingSize (int newValue) { + if (newValue < 0 || newValue > Byte.MAX_VALUE) { + throw new IllegalArgumentException( + "newValue is out of valid range. value: " + newValue); + } groupingSize = (byte)newValue; fastPathCheckNeeded = true; } @@ -3906,6 +3921,12 @@ // Didn't have exponential fields useExponentialNotation = false; } + + // Restore the invariant value if groupingSize is invalid. + if (groupingSize < 0) { + groupingSize = 3; + } + serialVersionOnStream = currentSerialVersion; } @@ -4009,14 +4030,15 @@ /** * The number of digits between grouping separators in the integer - * portion of a number. Must be greater than 0 if + * portion of a number. Must be non-negative and less than or equal to + * {@link java.lang.Byte#MAX_VALUE Byte.MAX_VALUE} if * {@code NumberFormat.groupingUsed} is true. * * @serial * @see #getGroupingSize * @see java.text.NumberFormat#isGroupingUsed */ - private byte groupingSize = 3; // invariant, > 0 if useThousands + private byte groupingSize = 3; // invariant, 0 - 127, if groupingUsed /** * If true, forces the decimal separator to always appear in a formatted diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/time/Clock.java --- a/src/java.base/share/classes/java/time/Clock.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/time/Clock.java Wed Oct 16 15:31:05 2019 +0200 @@ -641,6 +641,7 @@ static final class OffsetClock extends Clock implements Serializable { @java.io.Serial private static final long serialVersionUID = 2007484719125426256L; + @SuppressWarnings("serial") // Not statically typed as Serializable private final Clock baseClock; private final Duration offset; @@ -692,6 +693,7 @@ static final class TickClock extends Clock implements Serializable { @java.io.Serial private static final long serialVersionUID = 6504659149906368850L; + @SuppressWarnings("serial") // Not statically typed as Serializable private final Clock baseClock; private final long tickNanos; diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/time/chrono/ChronoPeriodImpl.java --- a/src/java.base/share/classes/java/time/chrono/ChronoPeriodImpl.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/time/chrono/ChronoPeriodImpl.java Wed Oct 16 15:31:05 2019 +0200 @@ -109,6 +109,7 @@ /** * The chronology. */ + @SuppressWarnings("serial") // Not statically typed as Serializable private final Chronology chrono; /** * The number of years. diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/util/AbstractMap.java --- a/src/java.base/share/classes/java/util/AbstractMap.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/util/AbstractMap.java Wed Oct 16 15:31:05 2019 +0200 @@ -607,7 +607,9 @@ @java.io.Serial private static final long serialVersionUID = -8499721149061103585L; + @SuppressWarnings("serial") // Conditionally serializable private final K key; + @SuppressWarnings("serial") // Conditionally serializable private V value; /** @@ -738,7 +740,9 @@ @java.io.Serial private static final long serialVersionUID = 7138329143949025153L; + @SuppressWarnings("serial") // Not statically typed as Serializable private final K key; + @SuppressWarnings("serial") // Not statically typed as Serializable private final V value; /** diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/util/ArrayPrefixHelpers.java --- a/src/java.base/share/classes/java/util/ArrayPrefixHelpers.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/util/ArrayPrefixHelpers.java Wed Oct 16 15:31:05 2019 +0200 @@ -103,10 +103,15 @@ static final int MIN_PARTITION = 16; static final class CumulateTask extends CountedCompleter { + @SuppressWarnings("serial") // Not statically typed as Serializable final T[] array; + @SuppressWarnings("serial") // Not statically typed as Serializable final BinaryOperator function; CumulateTask left, right; - T in, out; + @SuppressWarnings("serial") // Not statically typed as Serializable + T in; + @SuppressWarnings("serial") // Not statically typed as Serializable + T out; final int lo, hi, origin, fence, threshold; /** Root task constructor */ @@ -257,6 +262,7 @@ static final class LongCumulateTask extends CountedCompleter { final long[] array; + @SuppressWarnings("serial") // Not statically typed as Serializable final LongBinaryOperator function; LongCumulateTask left, right; long in, out; @@ -408,6 +414,7 @@ static final class DoubleCumulateTask extends CountedCompleter { final double[] array; + @SuppressWarnings("serial") // Not statically typed as Serializable final DoubleBinaryOperator function; DoubleCumulateTask left, right; double in, out; @@ -559,6 +566,7 @@ static final class IntCumulateTask extends CountedCompleter { final int[] array; + @SuppressWarnings("serial") // Not statically typed as Serializable final IntBinaryOperator function; IntCumulateTask left, right; int in, out; diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/util/Arrays.java --- a/src/java.base/share/classes/java/util/Arrays.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/util/Arrays.java Wed Oct 16 15:31:05 2019 +0200 @@ -4339,6 +4339,7 @@ { @java.io.Serial private static final long serialVersionUID = -2764017481108945198L; + @SuppressWarnings("serial") // Conditionally serializable private final E[] a; ArrayList(E[] array) { diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/util/ArraysParallelSortHelpers.java --- a/src/java.base/share/classes/java/util/ArraysParallelSortHelpers.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/util/ArraysParallelSortHelpers.java Wed Oct 16 15:31:05 2019 +0200 @@ -115,8 +115,12 @@ static final class Sorter extends CountedCompleter { @java.io.Serial static final long serialVersionUID = 2446542900576103244L; - final T[] a, w; + @SuppressWarnings("serial") // Not statically typed as Serializable + final T[] a; + @SuppressWarnings("serial") // Not statically typed as Serializable + final T[] w; final int base, size, wbase, gran; + @SuppressWarnings("serial") // Not statically typed as Serializable Comparator comparator; Sorter(CountedCompleter par, T[] a, T[] w, int base, int size, int wbase, int gran, @@ -153,8 +157,13 @@ static final class Merger extends CountedCompleter { @java.io.Serial static final long serialVersionUID = 2446542900576103244L; - final T[] a, w; // main and workspace arrays + // main and workspace arrays + @SuppressWarnings("serial") // Not statically typed as Serializable + final T[] a; + @SuppressWarnings("serial") // Not statically typed as Serializable + final T[] w; final int lbase, lsize, rbase, rsize, wbase, gran; + @SuppressWarnings("serial") // Not statically typed as Serializable Comparator comparator; Merger(CountedCompleter par, T[] a, T[] w, int lbase, int lsize, int rbase, diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/util/Collection.java --- a/src/java.base/share/classes/java/util/Collection.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/util/Collection.java Wed Oct 16 15:31:05 2019 +0200 @@ -188,6 +188,38 @@ * or if the only reference to the backing collection is through an * unmodifiable view, the view can be considered effectively immutable. * + *

Serializability of Collections

+ * + *

Serializability of collections is optional. As such, none of the collections + * interfaces are declared to implement the {@link java.io.Serializable} interface. + * However, serializability is regarded as being generally useful, so most collection + * implementations are serializable. + * + *

The collection implementations that are public classes (such as {@code ArrayList} + * or {@code HashMap}) are declared to implement the {@code Serializable} interface if they + * are in fact serializable. Some collections implementations are not public classes, + * such as the unmodifiable collections. In such cases, the + * serializability of such collections is described in the specification of the method + * that creates them, or in some other suitable place. In cases where the serializability + * of a collection is not specified, there is no guarantee about the serializability of such + * collections. In particular, many view collections are not serializable. + * + *

A collection implementation that implements the {@code Serializable} interface cannot + * be guaranteed to be serializable. The reason is that in general, collections + * contain elements of other types, and it is not possible to determine statically + * whether instances of some element type are actually serializable. For example, consider + * a serializable {@code Collection}, where {@code E} does not implement the + * {@code Serializable} interface. The collection may be serializable, if it contains only + * elements of some serializable subtype of {@code E}, or if it is empty. Collections are + * thus said to be conditionally serializable, as the serializability of the collection + * as a whole depends on whether the collection itself is serializable and on whether all + * contained elements are also serializable. + * + *

An additional case occurs with instances of {@link SortedSet} and {@link SortedMap}. + * These collections can be created with a {@link Comparator} that imposes an ordering on + * the set elements or map keys. Such a collection is serializable only if the provided + * {@code Comparator} is also serializable. + * *

This interface is a member of the * * Java Collections Framework. diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/util/Collections.java --- a/src/java.base/share/classes/java/util/Collections.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/util/Collections.java Wed Oct 16 15:31:05 2019 +0200 @@ -1024,6 +1024,7 @@ @java.io.Serial private static final long serialVersionUID = 1820017752578914078L; + @SuppressWarnings("serial") // Conditionally serializable final Collection c; UnmodifiableCollection(Collection c) { @@ -1164,6 +1165,7 @@ implements SortedSet, Serializable { @java.io.Serial private static final long serialVersionUID = -4929149591599911165L; + @SuppressWarnings("serial") // Conditionally serializable private final SortedSet ss; UnmodifiableSortedSet(SortedSet s) {super(s); ss = s;} @@ -1244,6 +1246,7 @@ /** * The instance we are protecting. */ + @SuppressWarnings("serial") // Conditionally serializable private final NavigableSet ns; UnmodifiableNavigableSet(NavigableSet s) {super(s); ns = s;} @@ -1304,6 +1307,7 @@ @java.io.Serial private static final long serialVersionUID = -283967356065247728L; + @SuppressWarnings("serial") // Conditionally serializable final List list; UnmodifiableList(List list) { @@ -1450,6 +1454,7 @@ @java.io.Serial private static final long serialVersionUID = -1034234728574286014L; + @SuppressWarnings("serial") // Conditionally serializable private final Map m; UnmodifiableMap(Map m) { @@ -1809,6 +1814,7 @@ @java.io.Serial private static final long serialVersionUID = -8806743815996713206L; + @SuppressWarnings("serial") // Conditionally serializable private final SortedMap sm; UnmodifiableSortedMap(SortedMap m) {super(m); sm = m; } @@ -1886,6 +1892,7 @@ /** * The instance we wrap and protect. */ + @SuppressWarnings("serial") // Conditionally serializable private final NavigableMap nm; UnmodifiableNavigableMap(NavigableMap m) @@ -2017,7 +2024,9 @@ @java.io.Serial private static final long serialVersionUID = 3053995032091335093L; + @SuppressWarnings("serial") // Conditionally serializable final Collection c; // Backing Collection + @SuppressWarnings("serial") // Conditionally serializable final Object mutex; // Object on which to synchronize SynchronizedCollection(Collection c) { @@ -2219,6 +2228,7 @@ @java.io.Serial private static final long serialVersionUID = 8695801310862127406L; + @SuppressWarnings("serial") // Conditionally serializable private final SortedSet ss; SynchronizedSortedSet(SortedSet s) { @@ -2314,6 +2324,7 @@ @java.io.Serial private static final long serialVersionUID = -5505529816273629798L; + @SuppressWarnings("serial") // Conditionally serializable private final NavigableSet ns; SynchronizedNavigableSet(NavigableSet s) { @@ -2424,6 +2435,7 @@ @java.io.Serial private static final long serialVersionUID = -7754090372962971524L; + @SuppressWarnings("serial") // Conditionally serializable final List list; SynchronizedList(List list) { @@ -2591,7 +2603,9 @@ @java.io.Serial private static final long serialVersionUID = 1978198479659022715L; + @SuppressWarnings("serial") // Conditionally serializable private final Map m; // Backing Map + @SuppressWarnings("serial") // Conditionally serializable final Object mutex; // Object on which to synchronize SynchronizedMap(Map m) { @@ -2788,6 +2802,7 @@ @java.io.Serial private static final long serialVersionUID = -8798146769416483793L; + @SuppressWarnings("serial") // Conditionally serializable private final SortedMap sm; SynchronizedSortedMap(SortedMap m) { @@ -2891,6 +2906,7 @@ @java.io.Serial private static final long serialVersionUID = 699392247599746807L; + @SuppressWarnings("serial") // Conditionally serializable private final NavigableMap nm; SynchronizedNavigableMap(NavigableMap m) { @@ -3070,7 +3086,9 @@ @java.io.Serial private static final long serialVersionUID = 1578914078182001775L; + @SuppressWarnings("serial") // Conditionally serializable final Collection c; + @SuppressWarnings("serial") // Conditionally serializable final Class type; @SuppressWarnings("unchecked") @@ -3126,6 +3144,7 @@ public boolean add(E e) { return c.add(typeCheck(e)); } + @SuppressWarnings("serial") // Conditionally serializable private E[] zeroLengthElementArray; // Lazily initialized private E[] zeroLengthElementArray() { @@ -3219,6 +3238,7 @@ { @java.io.Serial private static final long serialVersionUID = 1433151992604707767L; + @SuppressWarnings("serial") // Conditionally serializable final Queue queue; CheckedQueue(Queue queue, Class elementType) { @@ -3323,6 +3343,7 @@ @java.io.Serial private static final long serialVersionUID = 1599911165492914959L; + @SuppressWarnings("serial") // Conditionally serializable private final SortedSet ss; CheckedSortedSet(SortedSet s, Class type) { @@ -3387,6 +3408,7 @@ @java.io.Serial private static final long serialVersionUID = -5429120189805438922L; + @SuppressWarnings("serial") // Conditionally serializable private final NavigableSet ns; CheckedNavigableSet(NavigableSet s, Class type) { @@ -3470,6 +3492,7 @@ { @java.io.Serial private static final long serialVersionUID = 65247728283967356L; + @SuppressWarnings("serial") // Conditionally serializable final List list; CheckedList(List list, Class type) { @@ -3619,8 +3642,11 @@ @java.io.Serial private static final long serialVersionUID = 5742860141034234728L; + @SuppressWarnings("serial") // Conditionally serializable private final Map m; + @SuppressWarnings("serial") // Conditionally serializable final Class keyType; + @SuppressWarnings("serial") // Conditionally serializable final Class valueType; private void typeCheck(Object key, Object value) { @@ -4019,6 +4045,7 @@ @java.io.Serial private static final long serialVersionUID = 1599671320688067438L; + @SuppressWarnings("serial") // Conditionally serializable private final SortedMap sm; CheckedSortedMap(SortedMap m, @@ -4094,6 +4121,7 @@ @java.io.Serial private static final long serialVersionUID = -4852462692372534096L; + @SuppressWarnings("serial") // Conditionally serializable private final NavigableMap nm; CheckedNavigableMap(NavigableMap m, @@ -4825,6 +4853,7 @@ @java.io.Serial private static final long serialVersionUID = 3193687207550431679L; + @SuppressWarnings("serial") // Conditionally serializable private final E element; SingletonSet(E e) {element = e;} @@ -4879,6 +4908,7 @@ @java.io.Serial private static final long serialVersionUID = 3093736618740652951L; + @SuppressWarnings("serial") // Conditionally serializable private final E element; SingletonList(E obj) {element = obj;} @@ -4948,7 +4978,9 @@ @java.io.Serial private static final long serialVersionUID = -6979724477215052911L; + @SuppressWarnings("serial") // Conditionally serializable private final K k; + @SuppressWarnings("serial") // Conditionally serializable private final V v; SingletonMap(K key, V value) { @@ -5087,6 +5119,7 @@ private static final long serialVersionUID = 2739099268398711800L; final int n; + @SuppressWarnings("serial") // Conditionally serializable final E element; CopiesList(int n, E e) { @@ -5320,6 +5353,7 @@ * * @serial */ + @SuppressWarnings("serial") // Conditionally serializable final Comparator cmp; ReverseComparator2(Comparator cmp) { @@ -5601,6 +5635,7 @@ private static class SetFromMap extends AbstractSet implements Set, Serializable { + @SuppressWarnings("serial") // Conditionally serializable private final Map m; // The backing map private transient Set s; // Its keySet @@ -5686,6 +5721,7 @@ implements Queue, Serializable { @java.io.Serial private static final long serialVersionUID = 1802017725587941708L; + @SuppressWarnings("serial") // Conditionally serializable private final Deque q; AsLIFOQueue(Deque q) { this.q = q; } public boolean add(E e) { q.addFirst(e); return true; } diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/util/Comparators.java --- a/src/java.base/share/classes/java/util/Comparators.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/util/Comparators.java Wed Oct 16 15:31:05 2019 +0200 @@ -66,6 +66,7 @@ private static final long serialVersionUID = -7569533591570686392L; private final boolean nullFirst; // if null, non-null Ts are considered equal + @SuppressWarnings("serial") // Not statically typed as Serializable private final Comparator real; @SuppressWarnings("unchecked") diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/util/PriorityQueue.java --- a/src/java.base/share/classes/java/util/PriorityQueue.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/util/PriorityQueue.java Wed Oct 16 15:31:05 2019 +0200 @@ -111,6 +111,7 @@ * The comparator, or null if priority queue uses elements' * natural ordering. */ + @SuppressWarnings("serial") // Conditionally serializable private final Comparator comparator; /** diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/util/TreeMap.java --- a/src/java.base/share/classes/java/util/TreeMap.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/util/TreeMap.java Wed Oct 16 15:31:05 2019 +0200 @@ -118,6 +118,7 @@ * * @serial */ + @SuppressWarnings("serial") // Conditionally serializable private final Comparator comparator; private transient Entry root; @@ -1353,7 +1354,10 @@ * if loInclusive is true, lo is the inclusive bound, else lo * is the exclusive bound. Similarly for the upper bound. */ - final K lo, hi; + @SuppressWarnings("serial") // Conditionally serializable + final K lo; + @SuppressWarnings("serial") // Conditionally serializable + final K hi; final boolean fromStart, toEnd; final boolean loInclusive, hiInclusive; @@ -1936,6 +1940,7 @@ super(m, fromStart, lo, loInclusive, toEnd, hi, hiInclusive); } + @SuppressWarnings("serial") // Conditionally serializable private final Comparator reverseComparator = Collections.reverseOrder(m.comparator); @@ -2024,7 +2029,10 @@ @java.io.Serial private static final long serialVersionUID = -6520786458950516097L; private boolean fromStart = false, toEnd = false; - private K fromKey, toKey; + @SuppressWarnings("serial") // Conditionally serializable + private K fromKey; + @SuppressWarnings("serial") // Conditionally serializable + private K toKey; @java.io.Serial private Object readResolve() { return new AscendingSubMap<>(TreeMap.this, diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/util/Vector.java --- a/src/java.base/share/classes/java/util/Vector.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/util/Vector.java Wed Oct 16 15:31:05 2019 +0200 @@ -102,6 +102,7 @@ * * @serial */ + @SuppressWarnings("serial") // Conditionally serializable protected Object[] elementData; /** diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/util/jar/JarVerifier.java --- a/src/java.base/share/classes/java/util/jar/JarVerifier.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/util/jar/JarVerifier.java Wed Oct 16 15:31:05 2019 +0200 @@ -590,6 +590,7 @@ URL vlocation; CodeSigner[] vsigners; java.security.cert.Certificate[] vcerts; + @SuppressWarnings("serial") // Not statically typed as Serializable Object csdomain; VerifierCodeSource(Object csdomain, URL location, CodeSigner[] signers) { diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/java/util/regex/Pattern.java --- a/src/java.base/share/classes/java/util/regex/Pattern.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/java/util/regex/Pattern.java Wed Oct 16 15:31:05 2019 +0200 @@ -1427,7 +1427,11 @@ localTCNCount = 0; if (!pattern.isEmpty()) { - compile(); + try { + compile(); + } catch (StackOverflowError soe) { + throw error("Stack overflow during pattern compilation"); + } } else { root = new Start(lastAccept); matchRoot = lastAccept; @@ -1965,6 +1969,10 @@ int ch = temp[cursor++]; while (ch != 0 && !isLineSeparator(ch)) ch = temp[cursor++]; + if (ch == 0 && cursor > patternLength) { + cursor = patternLength; + ch = temp[cursor++]; + } return ch; } @@ -1975,6 +1983,10 @@ int ch = temp[++cursor]; while (ch != 0 && !isLineSeparator(ch)) ch = temp[++cursor]; + if (ch == 0 && cursor > patternLength) { + cursor = patternLength; + ch = temp[cursor]; + } return ch; } @@ -3415,9 +3427,10 @@ private int N() { if (read() == '{') { int i = cursor; - while (cursor < patternLength && read() != '}') {} - if (cursor > patternLength) - throw error("Unclosed character name escape sequence"); + while (read() != '}') { + if (cursor >= patternLength) + throw error("Unclosed character name escape sequence"); + } String name = new String(temp, i, cursor - i - 1); try { return Character.codePointOf(name); diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/javax/crypto/CryptoPermission.java --- a/src/java.base/share/classes/javax/crypto/CryptoPermission.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/javax/crypto/CryptoPermission.java Wed Oct 16 15:31:05 2019 +0200 @@ -55,6 +55,7 @@ private String alg; private int maxKeySize = Integer.MAX_VALUE; // no restriction on maxKeySize private String exemptionMechanism = null; + @SuppressWarnings("serial") // Not statically typed as Serializable private AlgorithmParameterSpec algParamSpec = null; private boolean checkParam = false; // no restriction on param diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/javax/security/auth/PrivateCredentialPermission.java --- a/src/java.base/share/classes/javax/security/auth/PrivateCredentialPermission.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/javax/security/auth/PrivateCredentialPermission.java Wed Oct 16 15:31:05 2019 +0200 @@ -119,6 +119,7 @@ * The set contains elements of type, * {@code PrivateCredentialPermission.CredOwner}. */ + @SuppressWarnings("serial") // Not statically typed as Serializable private Set principals; // ignored - kept around for compatibility private transient CredOwner[] credOwners; diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/javax/security/auth/Subject.java --- a/src/java.base/share/classes/javax/security/auth/Subject.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/javax/security/auth/Subject.java Wed Oct 16 15:31:05 2019 +0200 @@ -111,6 +111,7 @@ * {@code java.security.Principal}. * The set is a {@code Subject.SecureSet}. */ + @SuppressWarnings("serial") // Not statically typed as Serializable Set principals; /** diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/javax/security/auth/callback/UnsupportedCallbackException.java --- a/src/java.base/share/classes/javax/security/auth/callback/UnsupportedCallbackException.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/javax/security/auth/callback/UnsupportedCallbackException.java Wed Oct 16 15:31:05 2019 +0200 @@ -39,6 +39,7 @@ /** * @serial */ + @SuppressWarnings("serial") // Not statically typed as Serializable private Callback callback; /** diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/net/www/protocol/http/HttpURLConnection.java --- a/src/java.base/share/classes/sun/net/www/protocol/http/HttpURLConnection.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/net/www/protocol/http/HttpURLConnection.java Wed Oct 16 15:31:05 2019 +0200 @@ -2171,6 +2171,10 @@ } while (retryTunnel < maxRedirects); if (retryTunnel >= maxRedirects || (respCode != HTTP_OK)) { + if (respCode != HTTP_PROXY_AUTH) { + // remove all but authenticate responses + responses.reset(); + } throw new IOException("Unable to tunnel through proxy."+ " Proxy returns \"" + statusLine + "\""); diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/net/www/protocol/http/NegotiateAuthentication.java --- a/src/java.base/share/classes/sun/net/www/protocol/http/NegotiateAuthentication.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/net/www/protocol/http/NegotiateAuthentication.java Wed Oct 16 15:31:05 2019 +0200 @@ -49,6 +49,7 @@ private static final long serialVersionUID = 100L; private static final PlatformLogger logger = HttpURLConnection.getHttpLogger(); + @SuppressWarnings("serial") // Not statically typed as Serializable private final HttpCallerInfo hci; // These maps are used to manage the GSS availability for diffrent @@ -67,6 +68,7 @@ } // The HTTP Negotiate Helper + @SuppressWarnings("serial") // Not statically typed as Serializable private Negotiator negotiator = null; /** diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/net/www/protocol/jar/Handler.java --- a/src/java.base/share/classes/sun/net/www/protocol/jar/Handler.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/net/www/protocol/jar/Handler.java Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -121,6 +121,13 @@ return h; } + public String checkNestedProtocol(String spec) { + if (spec.regionMatches(true, 0, "jar:", 0, 4)) { + return "Nested JAR URLs are not supported"; + } else { + return null; + } + } @Override @SuppressWarnings("deprecation") @@ -146,6 +153,12 @@ : false; spec = spec.substring(start, limit); + String exceptionMessage = checkNestedProtocol(spec); + if (exceptionMessage != null) { + // NPE will be transformed into MalformedURLException by the caller + throw new NullPointerException(exceptionMessage); + } + if (absoluteSpec) { file = parseAbsoluteSpec(spec); } else if (!refOnly) { diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/nio/ch/DatagramChannelImpl.java --- a/src/java.base/share/classes/sun/nio/ch/DatagramChannelImpl.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/nio/ch/DatagramChannelImpl.java Wed Oct 16 15:31:05 2019 +0200 @@ -875,6 +875,11 @@ if (state == ST_CONNECTED) throw new AlreadyConnectedException(); + // ensure that the socket is bound + if (localAddress == null) { + bindInternal(null); + } + int n = Net.connect(family, fd, isa.getAddress(), @@ -932,8 +937,21 @@ remoteAddress = null; state = ST_UNCONNECTED; - // refresh local address - localAddress = Net.localAddress(fd); + // check whether rebind is needed + InetSocketAddress isa = Net.localAddress(fd); + if (isa.getPort() == 0) { + // On Linux, if bound to ephemeral port, + // disconnect does not preserve that port. + // In this case, try to rebind to the previous port. + int port = localAddress.getPort(); + localAddress = isa; // in case Net.bind fails + Net.bind(family, fd, isa.getAddress(), port); + isa = Net.localAddress(fd); // refresh address + assert isa.getPort() == port; + } + + // refresh localAddress + localAddress = isa; } } finally { writeLock.unlock(); diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/nio/ch/SelectorImpl.java --- a/src/java.base/share/classes/sun/nio/ch/SelectorImpl.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/nio/ch/SelectorImpl.java Wed Oct 16 15:31:05 2019 +0200 @@ -208,7 +208,8 @@ if (!(ch instanceof SelChImpl)) throw new IllegalSelectorException(); SelectionKeyImpl k = new SelectionKeyImpl((SelChImpl)ch, this); - k.attach(attachment); + if (attachment != null) + k.attach(attachment); // register (if needed) before adding to key set implRegister(k); diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/nio/ch/ServerSocketAdaptor.java --- a/src/java.base/share/classes/sun/nio/ch/ServerSocketAdaptor.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/nio/ch/ServerSocketAdaptor.java Wed Oct 16 15:31:05 2019 +0200 @@ -37,6 +37,9 @@ import java.nio.channels.IllegalBlockingModeException; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; +import java.security.AccessController; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; import java.util.Set; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -59,7 +62,12 @@ private volatile int timeout; static ServerSocket create(ServerSocketChannelImpl ssc) { - return new ServerSocketAdaptor(ssc); + PrivilegedExceptionAction pa = () -> new ServerSocketAdaptor(ssc); + try { + return AccessController.doPrivileged(pa); + } catch (PrivilegedActionException pae) { + throw new InternalError("Should not reach here", pae); + } } private ServerSocketAdaptor(ServerSocketChannelImpl ssc) { diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/nio/ch/SocketAdaptor.java --- a/src/java.base/share/classes/sun/nio/ch/SocketAdaptor.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/nio/ch/SocketAdaptor.java Wed Oct 16 15:31:05 2019 +0200 @@ -36,6 +36,9 @@ import java.net.SocketOption; import java.net.StandardSocketOptions; import java.nio.channels.SocketChannel; +import java.security.AccessController; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; import java.util.Set; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -61,10 +64,11 @@ } static Socket create(SocketChannelImpl sc) { + PrivilegedExceptionAction pa = () -> new SocketAdaptor(sc); try { - return new SocketAdaptor(sc); - } catch (SocketException e) { - throw new InternalError("Should not reach here"); + return AccessController.doPrivileged(pa); + } catch (PrivilegedActionException pae) { + throw new InternalError("Should not reach here", pae); } } diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/nio/cs/ArrayDecoder.java --- a/src/java.base/share/classes/sun/nio/cs/ArrayDecoder.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/nio/cs/ArrayDecoder.java Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,9 @@ /* * FastPath byte[]->char[] decoder, REPLACE on malformed or * unmappable input. + * + * FastPath encoded byte[]-> "String Latin1 coding" byte[] decoder for use when + * charset is always decodable to the internal String Latin1 coding byte[], ie. all mappings <=0xff */ public interface ArrayDecoder { @@ -36,4 +39,14 @@ default boolean isASCIICompatible() { return false; } + + // Is always decodable to internal String Latin1 coding, ie. all mappings <= 0xff + default boolean isLatin1Decodable() { + return false; + } + + // Decode to internal String Latin1 coding byte[] fastpath for when isLatin1Decodable == true + default int decodeToLatin1(byte[] src, int sp, int len, byte[] dst) { + return 0; + } } diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/nio/cs/SingleByte.java --- a/src/java.base/share/classes/sun/nio/cs/SingleByte.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/nio/cs/SingleByte.java Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,17 +50,27 @@ implements ArrayDecoder { private final char[] b2c; private final boolean isASCIICompatible; + private final boolean isLatin1Decodable; public Decoder(Charset cs, char[] b2c) { super(cs, 1.0f, 1.0f); this.b2c = b2c; this.isASCIICompatible = false; + this.isLatin1Decodable = false; } public Decoder(Charset cs, char[] b2c, boolean isASCIICompatible) { super(cs, 1.0f, 1.0f); this.b2c = b2c; this.isASCIICompatible = isASCIICompatible; + this.isLatin1Decodable = false; + } + + public Decoder(Charset cs, char[] b2c, boolean isASCIICompatible, boolean isLatin1Decodable) { + super(cs, 1.0f, 1.0f); + this.b2c = b2c; + this.isASCIICompatible = isASCIICompatible; + this.isLatin1Decodable = isLatin1Decodable; } private CoderResult decodeArrayLoop(ByteBuffer src, CharBuffer dst) { @@ -125,6 +135,18 @@ } @Override + public int decodeToLatin1(byte[] src, int sp, int len, byte[] dst) { + if (len > dst.length) + len = dst.length; + + int dp = 0; + while (dp < len) { + dst[dp++] = (byte)decode(src[sp++]); + } + return dp; + } + + @Override public int decode(byte[] src, int sp, int len, char[] dst) { if (len > dst.length) len = dst.length; @@ -143,6 +165,11 @@ public boolean isASCIICompatible() { return isASCIICompatible; } + + @Override + public boolean isLatin1Decodable() { + return isLatin1Decodable; + } } public static final class Encoder extends CharsetEncoder diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/reflect/annotation/AnnotationInvocationHandler.java --- a/src/java.base/share/classes/sun/reflect/annotation/AnnotationInvocationHandler.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/reflect/annotation/AnnotationInvocationHandler.java Wed Oct 16 15:31:05 2019 +0200 @@ -44,6 +44,7 @@ @java.io.Serial private static final long serialVersionUID = 6182022883658399397L; private final Class type; + @SuppressWarnings("serial") // Not statically typed as Serializable private final Map memberValues; AnnotationInvocationHandler(Class type, Map memberValues) { diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/reflect/annotation/AnnotationTypeMismatchExceptionProxy.java --- a/src/java.base/share/classes/sun/reflect/annotation/AnnotationTypeMismatchExceptionProxy.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/reflect/annotation/AnnotationTypeMismatchExceptionProxy.java Wed Oct 16 15:31:05 2019 +0200 @@ -36,7 +36,8 @@ class AnnotationTypeMismatchExceptionProxy extends ExceptionProxy { @java.io.Serial private static final long serialVersionUID = 7844069490309503934L; - private Method member; + @SuppressWarnings("serial") // Not statically typed as Serializable + private Method member; // Would be more robust to null-out in a writeObject method. private final String foundType; /** diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/security/internal/spec/TlsKeyMaterialSpec.java --- a/src/java.base/share/classes/sun/security/internal/spec/TlsKeyMaterialSpec.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/security/internal/spec/TlsKeyMaterialSpec.java Wed Oct 16 15:31:05 2019 +0200 @@ -50,7 +50,11 @@ private final SecretKey clientMacKey, serverMacKey; private final SecretKey clientCipherKey, serverCipherKey; - private final IvParameterSpec clientIv, serverIv; + + @SuppressWarnings("serial") // Not statically typed as Serializable + private final IvParameterSpec clientIv; + @SuppressWarnings("serial") // Not statically typed as Serializable + private final IvParameterSpec serverIv; /** * Constructs a new TlsKeymaterialSpec from the client and server MAC diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/security/provider/PolicyParser.java --- a/src/java.base/share/classes/sun/security/provider/PolicyParser.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/security/provider/PolicyParser.java Wed Oct 16 15:31:05 2019 +0200 @@ -1315,7 +1315,9 @@ private static final long serialVersionUID = -4330692689482574072L; private String i18nMessage; + @SuppressWarnings("serial") // Not statically typed as Serializable private LocalizedMessage localizedMsg; + @SuppressWarnings("serial") // Not statically typed as Serializable private Object[] source; /** diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/security/provider/SubjectCodeSource.java --- a/src/java.base/share/classes/sun/security/provider/SubjectCodeSource.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/security/provider/SubjectCodeSource.java Wed Oct 16 15:31:05 2019 +0200 @@ -54,6 +54,7 @@ private static final Class[] PARAMS = { String.class }; private static final sun.security.util.Debug debug = sun.security.util.Debug.getInstance("auth", "\t[Auth Access]"); + @SuppressWarnings("serial") // Not statically typed as Serializable private ClassLoader sysClassLoader; /** diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/security/provider/certpath/X509CertPath.java --- a/src/java.base/share/classes/sun/security/provider/certpath/X509CertPath.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/security/provider/certpath/X509CertPath.java Wed Oct 16 15:31:05 2019 +0200 @@ -69,6 +69,7 @@ /** * List of certificates in this chain */ + @SuppressWarnings("serial") // Not statically typed as Serializable private List certs; /** diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/security/rsa/RSAPrivateCrtKeyImpl.java --- a/src/java.base/share/classes/sun/security/rsa/RSAPrivateCrtKeyImpl.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/security/rsa/RSAPrivateCrtKeyImpl.java Wed Oct 16 15:31:05 2019 +0200 @@ -70,6 +70,7 @@ // Optional parameters associated with this RSA key // specified in the encoding of its AlgorithmId. // Must be null for "RSA" keys. + @SuppressWarnings("serial") // Not statically typed as Serializable private AlgorithmParameterSpec keyParams; /** diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/security/rsa/RSAPrivateKeyImpl.java --- a/src/java.base/share/classes/sun/security/rsa/RSAPrivateKeyImpl.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/security/rsa/RSAPrivateKeyImpl.java Wed Oct 16 15:31:05 2019 +0200 @@ -61,6 +61,7 @@ // optional parameters associated with this RSA key // specified in the encoding of its AlgorithmId. // must be null for "RSA" keys. + @SuppressWarnings("serial") // Not statically typed as Serializable private final AlgorithmParameterSpec keyParams; /** diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/security/rsa/RSAPublicKeyImpl.java --- a/src/java.base/share/classes/sun/security/rsa/RSAPublicKeyImpl.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/security/rsa/RSAPublicKeyImpl.java Wed Oct 16 15:31:05 2019 +0200 @@ -62,6 +62,7 @@ // optional parameters associated with this RSA key // specified in the encoding of its AlgorithmId // must be null for "RSA" keys. + @SuppressWarnings("serial") // Not statically typed as Serializable private AlgorithmParameterSpec keyParams; /** diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/security/ssl/SupportedGroupsExtension.java --- a/src/java.base/share/classes/sun/security/ssl/SupportedGroupsExtension.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/security/ssl/SupportedGroupsExtension.java Wed Oct 16 15:31:05 2019 +0200 @@ -201,7 +201,7 @@ // Primary XDH (RFC 7748) curves NamedGroup.X25519, - // Primary NIST curves (e.g. used in TLSv1.3) + // Primary NIST Suite B curves NamedGroup.SECP256_R1, NamedGroup.SECP384_R1, NamedGroup.SECP521_R1, @@ -209,17 +209,6 @@ // Secondary XDH curves NamedGroup.X448, - // Secondary NIST curves - NamedGroup.SECT283_K1, - NamedGroup.SECT283_R1, - NamedGroup.SECT409_K1, - NamedGroup.SECT409_R1, - NamedGroup.SECT571_K1, - NamedGroup.SECT571_R1, - - // non-NIST curves - NamedGroup.SECP256_K1, - // FFDHE (RFC 7919) NamedGroup.FFDHE_2048, NamedGroup.FFDHE_3072, diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/security/util/FilePermCompat.java --- a/src/java.base/share/classes/sun/security/util/FilePermCompat.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/security/util/FilePermCompat.java Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,8 +42,11 @@ public static final boolean compat; static { - String flag = GetPropertyAction.privilegedGetProperty( - "jdk.io.permissionsUseCanonicalPath", "false"); + String flag = SecurityProperties.privilegedGetOverridable( + "jdk.io.permissionsUseCanonicalPath"); + if (flag == null) { + flag = "false"; + } switch (flag) { case "true": nb = false; diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/security/util/ObjectIdentifier.java --- a/src/java.base/share/classes/sun/security/util/ObjectIdentifier.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/security/util/ObjectIdentifier.java Wed Oct 16 15:31:05 2019 +0200 @@ -98,6 +98,7 @@ * Changed to Object * @serial */ + @SuppressWarnings("serial") // Not statically typed as Serializable private Object components = null; // path from root /** * @serial diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/security/util/SecurityConstants.java --- a/src/java.base/share/classes/sun/security/util/SecurityConstants.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/security/util/SecurityConstants.java Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -97,6 +97,10 @@ public static final NetPermission GET_RESPONSECACHE_PERMISSION = new NetPermission("getResponseCache"); + // java.net.ServerSocket, java.net.Socket + public static final NetPermission SET_SOCKETIMPL_PERMISSION = + new NetPermission("setSocketImpl"); + // java.lang.SecurityManager, sun.applet.AppletPanel public static final RuntimePermission CREATE_CLASSLOADER_PERMISSION = new RuntimePermission("createClassLoader"); diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/security/validator/ValidatorException.java --- a/src/java.base/share/classes/sun/security/validator/ValidatorException.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/security/validator/ValidatorException.java Wed Oct 16 15:31:05 2019 +0200 @@ -62,6 +62,7 @@ public static final Object T_UNTRUSTED_CERT = "Untrusted certificate"; + @SuppressWarnings("serial") // Not statically typed as Serializable private Object type; private X509Certificate cert; diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/security/x509/AlgorithmId.java --- a/src/java.base/share/classes/sun/security/x509/AlgorithmId.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/security/x509/AlgorithmId.java Wed Oct 16 15:31:05 2019 +0200 @@ -72,6 +72,7 @@ private ObjectIdentifier algid; // The (parsed) parameters + @SuppressWarnings("serial") // Not statically typed as Serializable private AlgorithmParameters algParams; private boolean constructedFromDer = true; @@ -80,6 +81,7 @@ * DER-encoded form; subclasses can be made to automaticaly parse * them so there is fast access to these parameters. */ + @SuppressWarnings("serial") // Not statically typed as Serializable protected DerValue params; diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/security/x509/X509CertImpl.java --- a/src/java.base/share/classes/sun/security/x509/X509CertImpl.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/security/x509/X509CertImpl.java Wed Oct 16 15:31:05 2019 +0200 @@ -70,6 +70,7 @@ * @author Hemma Prafullchandra * @see X509CertInfo */ +@SuppressWarnings("serial") // See writeReplace method in Certificate public class X509CertImpl extends X509Certificate implements DerEncoder { @java.io.Serial diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/classes/sun/security/x509/X509Key.java --- a/src/java.base/share/classes/sun/security/x509/X509Key.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/classes/sun/security/x509/X509Key.java Wed Oct 16 15:31:05 2019 +0200 @@ -84,7 +84,7 @@ private int unusedBits = 0; /* BitArray form of key */ - private BitArray bitStringKey = null; + private transient BitArray bitStringKey = null; /* The encoding for the key. */ protected byte[] encodedKey; diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/conf/security/java.security --- a/src/java.base/share/conf/security/java.security Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/conf/security/java.security Wed Oct 16 15:31:05 2019 +0200 @@ -1213,3 +1213,51 @@ # if this property is not enabled. # jdk.security.caDistrustPolicies=SYMANTEC_TLS + +# +# FilePermission path canonicalization +# +# This security property dictates how the path argument is processed and stored +# while constructing a FilePermission object. If the value is set to true, the +# path argument is canonicalized and FilePermission methods (such as implies, +# equals, and hashCode) are implemented based on this canonicalized result. +# Otherwise, the path argument is not canonicalized and FilePermission methods are +# implemented based on the original input. See the implementation note of the +# FilePermission class for more details. +# +# If a system property of the same name is also specified, it supersedes the +# security property value defined here. +# +# The default value for this property is false. +# +jdk.io.permissionsUseCanonicalPath=false + + +# +# Policies for the proxy_impersonator Kerberos ccache configuration entry +# +# The proxy_impersonator ccache configuration entry indicates that the ccache +# is a synthetic delegated credential for use with S4U2Proxy by an intermediate +# server. The ccache file should also contain the TGT of this server and +# an evidence ticket from the default principal of the ccache to this server. +# +# This security property determines how Java uses this configuration entry. +# There are 3 possible values: +# +# no-impersonate - Ignore this configuration entry, and always act as +# the owner of the TGT (if it exists). +# +# try-impersonate - Try impersonation when this configuration entry exists. +# If no matching TGT or evidence ticket is found, +# fallback to no-impersonate. +# +# always-impersonate - Always impersonate when this configuration entry exists. +# If no matching TGT or evidence ticket is found, +# no initial credential is read from the ccache. +# +# The default value is "always-impersonate". +# +# If a system property of the same name is also specified, it supersedes the +# security property value defined here. +# +#jdk.security.krb5.default.initiate.credential=always-impersonate diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/native/libjava/Class.c --- a/src/java.base/share/native/libjava/Class.c Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/native/libjava/Class.c Wed Oct 16 15:31:05 2019 +0200 @@ -35,12 +35,9 @@ #include "jni.h" #include "jni_util.h" #include "jvm.h" +#include "check_classname.h" #include "java_lang_Class.h" -/* defined in libverify.so/verify.dll (src file common/check_format.c) */ -extern jboolean VerifyClassname(char *utf_name, jboolean arrayAllowed); -extern jboolean VerifyFixClassname(char *utf_name); - #define OBJ "Ljava/lang/Object;" #define CLS "Ljava/lang/Class;" #define CPL "Ljdk/internal/reflect/ConstantPool;" @@ -123,14 +120,14 @@ } (*env)->GetStringUTFRegion(env, classname, 0, unicode_len, clname); - if (VerifyFixClassname(clname) == JNI_TRUE) { + if (verifyFixClassname(clname) == JNI_TRUE) { /* slashes present in clname, use name b4 translation for exception */ (*env)->GetStringUTFRegion(env, classname, 0, unicode_len, clname); JNU_ThrowClassNotFoundException(env, clname); goto done; } - if (!VerifyClassname(clname, JNI_TRUE)) { /* expects slashed name */ + if (!verifyClassname(clname, JNI_TRUE)) { /* expects slashed name */ JNU_ThrowClassNotFoundException(env, clname); goto done; } diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/native/libjava/ClassLoader.c --- a/src/java.base/share/native/libjava/ClassLoader.c Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/native/libjava/ClassLoader.c Wed Oct 16 15:31:05 2019 +0200 @@ -30,14 +30,11 @@ #include "jni_util.h" #include "jlong.h" #include "jvm.h" +#include "check_classname.h" #include "java_lang_ClassLoader.h" #include "java_lang_ClassLoader_NativeLibrary.h" #include -/* defined in libverify.so/verify.dll (src file common/check_format.c) */ -extern jboolean VerifyClassname(char *utf_name, jboolean arrayAllowed); -extern jboolean VerifyFixClassname(char *utf_name); - static JNINativeMethod methods[] = { {"retrieveDirectives", "()Ljava/lang/AssertionStatusDirectives;", (void *)&JVM_AssertionStatusDirectives} }; @@ -120,7 +117,7 @@ if (utfName == NULL) { goto free_body; } - VerifyFixClassname(utfName); + fixClassname(utfName); } else { utfName = NULL; } @@ -185,7 +182,7 @@ JNU_ThrowOutOfMemoryError(env, NULL); return result; } - VerifyFixClassname(utfName); + fixClassname(utfName); } else { utfName = NULL; } @@ -231,9 +228,9 @@ JNU_ThrowOutOfMemoryError(env, NULL); return NULL; } - VerifyFixClassname(clname); + fixClassname(clname); - if (!VerifyClassname(clname, JNI_TRUE)) { /* expects slashed name */ + if (!verifyClassname(clname, JNI_TRUE)) { /* expects slashed name */ goto done; } diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/native/libjava/VM.c --- a/src/java.base/share/native/libjava/VM.c Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/native/libjava/VM.c Wed Oct 16 15:31:05 2019 +0200 @@ -42,11 +42,6 @@ JNIEXPORT void JNICALL Java_jdk_internal_misc_VM_initialize(JNIEnv *env, jclass cls) { - if (!JDK_InitJvmHandle()) { - JNU_ThrowInternalError(env, "Handle for JVM not found for symbol lookup"); - return; - } - // Registers implementations of native methods described in methods[] // above. // In particular, registers JVM_GetNanoTimeAdjustment as the implementation diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/native/libjava/check_classname.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/java.base/share/native/libjava/check_classname.c Wed Oct 16 15:31:05 2019 +0200 @@ -0,0 +1,292 @@ +/* + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include +#include +#include +#include +#include + +#include "jni.h" +#include "jvm.h" +#include "check_classname.h" + +typedef unsigned short unicode; + +static char * +skip_over_fieldname(char *name, jboolean slash_okay, + unsigned int len); +static char * +skip_over_field_signature(char *name, jboolean void_okay, + unsigned int len); + +/* + * Return non-zero if the character is a valid in JVM class name, zero + * otherwise. The only characters currently disallowed from JVM class + * names are given in the table below: + * + * Character Hex Decimal + * '.' 0x2e 46 + * '/' 0x2f 47 + * ';' 0x3b 59 + * '[' 0x5b 91 + * + * (Method names have further restrictions dealing with the '<' and + * '>' characters.) + */ +static int isJvmIdentifier(unicode ch) { + if( ch > 91 || ch < 46 ) + return 1; /* Lowercase ASCII letters are > 91 */ + else { /* 46 <= ch <= 91 */ + if (ch <= 90 && ch >= 60) { + return 1; /* Uppercase ASCII recognized here */ + } else { /* ch == 91 || 46 <= ch <= 59 */ + if (ch == 91 || ch == 59 || ch <= 47) + return 0; + else + return 1; + } + } +} + +static unicode +next_utf2unicode(char **utfstring_ptr, int * valid) +{ + unsigned char *ptr = (unsigned char *)(*utfstring_ptr); + unsigned char ch, ch2, ch3; + int length = 1; /* default length */ + unicode result = 0x80; /* default bad result; */ + *valid = 1; + switch ((ch = ptr[0]) >> 4) { + default: + result = ch; + break; + + case 0x8: case 0x9: case 0xA: case 0xB: case 0xF: + /* Shouldn't happen. */ + *valid = 0; + break; + + case 0xC: case 0xD: + /* 110xxxxx 10xxxxxx */ + if (((ch2 = ptr[1]) & 0xC0) == 0x80) { + unsigned char high_five = ch & 0x1F; + unsigned char low_six = ch2 & 0x3F; + result = (high_five << 6) + low_six; + length = 2; + } + break; + + case 0xE: + /* 1110xxxx 10xxxxxx 10xxxxxx */ + if (((ch2 = ptr[1]) & 0xC0) == 0x80) { + if (((ch3 = ptr[2]) & 0xC0) == 0x80) { + unsigned char high_four = ch & 0x0f; + unsigned char mid_six = ch2 & 0x3f; + unsigned char low_six = ch3 & 0x3f; + result = (((high_four << 6) + mid_six) << 6) + low_six; + length = 3; + } else { + length = 2; + } + } + break; + } /* end of switch */ + + *utfstring_ptr = (char *)(ptr + length); + return result; +} + +/* Take pointer to a string. Skip over the longest part of the string that + * could be taken as a fieldname. Allow '/' if slash_okay is JNI_TRUE. + * + * Return a pointer to just past the fieldname. Return NULL if no fieldname + * at all was found, or in the case of slash_okay being true, we saw + * consecutive slashes (meaning we were looking for a qualified path but + * found something that was badly-formed). + */ +static char * +skip_over_fieldname(char *name, jboolean slash_okay, + unsigned int length) +{ + char *p; + unicode ch; + unicode last_ch = 0; + int valid = 1; + /* last_ch == 0 implies we are looking at the first char. */ + for (p = name; p != name + length; last_ch = ch) { + char *old_p = p; + ch = *p; + if (ch < 128) { + p++; + if (isJvmIdentifier(ch)) { + continue; + } + } else { + char *tmp_p = p; + ch = next_utf2unicode(&tmp_p, &valid); + if (valid == 0) + return 0; + p = tmp_p; + if (isJvmIdentifier(ch)) { + continue; + } + } + + if (slash_okay && ch == '/' && last_ch) { + if (last_ch == '/') { + return 0; /* Don't permit consecutive slashes */ + } + } else if (ch == '_' || ch == '$') { + } else { + return last_ch ? old_p : 0; + } + } + return last_ch ? p : 0; +} + +/* Take pointer to a string. Skip over the longest part of the string that + * could be taken as a field signature. Allow "void" if void_okay. + * + * Return a pointer to just past the signature. Return NULL if no legal + * signature is found. + */ + +static char * +skip_over_field_signature(char *name, jboolean void_okay, + unsigned int length) +{ + unsigned int array_dim = 0; + for (;length > 0;) { + switch (name[0]) { + case JVM_SIGNATURE_VOID: + if (!void_okay) return 0; + /* FALL THROUGH */ + case JVM_SIGNATURE_BOOLEAN: + case JVM_SIGNATURE_BYTE: + case JVM_SIGNATURE_CHAR: + case JVM_SIGNATURE_SHORT: + case JVM_SIGNATURE_INT: + case JVM_SIGNATURE_FLOAT: + case JVM_SIGNATURE_LONG: + case JVM_SIGNATURE_DOUBLE: + return name + 1; + + case JVM_SIGNATURE_CLASS: { + /* Skip over the classname, if one is there. */ + char *p = + skip_over_fieldname(name + 1, JNI_TRUE, --length); + /* The next character better be a semicolon. */ + if (p && p - name - 1 > 0 && p[0] == ';') + return p + 1; + return 0; + } + + case JVM_SIGNATURE_ARRAY: + array_dim++; + /* JVMS 2nd ed. 4.10 */ + /* The number of dimensions in an array is limited to 255 ... */ + if (array_dim > 255) { + return 0; + } + /* The rest of what's there better be a legal signature. */ + name++; + length--; + void_okay = JNI_FALSE; + break; + + default: + return 0; + } + } + return 0; +} + +/* Determine if the specified name is legal + * UTF name for a classname. + * + * Note that this routine expects the internal form of qualified classes: + * the dots should have been replaced by slashes. + */ +jboolean verifyClassname(char *name, jboolean allowArrayClass) +{ + size_t s = strlen(name); + assert(s <= UINT_MAX); + unsigned int length = (unsigned int)s; + char *p; + + if (length > 0 && name[0] == JVM_SIGNATURE_ARRAY) { + if (!allowArrayClass) { + return JNI_FALSE; + } else { + /* Everything that's left better be a field signature */ + p = skip_over_field_signature(name, JNI_FALSE, length); + } + } else { + /* skip over the fieldname. Slashes are okay */ + p = skip_over_fieldname(name, JNI_TRUE, length); + } + return (p != 0 && p - name == (ptrdiff_t)length); +} + +/* + * Translates '.' to '/'. Returns JNI_TRUE if any / were present. + */ +jboolean verifyFixClassname(char *name) +{ + char *p = name; + jboolean slashesFound = JNI_FALSE; + int valid = 1; + + while (valid != 0 && *p != '\0') { + if (*p == '/') { + slashesFound = JNI_TRUE; + p++; + } else if (*p == '.') { + *p++ = '/'; + } else { + next_utf2unicode(&p, &valid); + } + } + + return slashesFound && valid != 0; +} + +/* + * Translates '.' to '/'. + */ +void fixClassname(char *name) +{ + char *p = name; + int valid = 1; + + while (valid != 0 && *p != '\0') { + if (*p == '.') { + *p++ = '/'; + } else { + next_utf2unicode(&p, &valid); + } + } +} diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/native/libjava/check_classname.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/java.base/share/native/libjava/check_classname.h Wed Oct 16 15:31:05 2019 +0200 @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "jni.h" + +/* + * Class name checking methods + */ + +jboolean verifyClassname(char *name, jboolean allowArrayClass); +jboolean verifyFixClassname(char *name); +void fixClassname(char *name); diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/native/libjava/jdk_util.h --- a/src/java.base/share/native/libjava/jdk_util.h Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/native/libjava/jdk_util.h Wed Oct 16 15:31:05 2019 +0200 @@ -45,20 +45,6 @@ JNIEXPORT void JDK_GetVersionInfo0(jdk_version_info* info, size_t info_size); - -/*------------------------------------------------------- - * Internal interface for JDK to use - *------------------------------------------------------- - */ - -/* Init JVM handle for symbol lookup; - * Return 0 if JVM handle not found. - */ -int JDK_InitJvmHandle(); - -/* Find the named JVM entry; returns NULL if not found. */ -void* JDK_FindJvmEntry(const char* name); - #ifdef __cplusplus } /* extern "C" */ #endif /* __cplusplus */ diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/native/libjava/jni_util.c --- a/src/java.base/share/native/libjava/jni_util.c Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/native/libjava/jni_util.c Wed Oct 16 15:31:05 2019 +0200 @@ -77,77 +77,23 @@ } JNIEXPORT void JNICALL -JNU_ThrowIllegalAccessError(JNIEnv *env, const char *msg) -{ - JNU_ThrowByName(env, "java/lang/IllegalAccessError", msg); -} - -JNIEXPORT void JNICALL -JNU_ThrowIllegalAccessException(JNIEnv *env, const char *msg) -{ - JNU_ThrowByName(env, "java/lang/IllegalAccessException", msg); -} - -JNIEXPORT void JNICALL JNU_ThrowInternalError(JNIEnv *env, const char *msg) { JNU_ThrowByName(env, "java/lang/InternalError", msg); } JNIEXPORT void JNICALL -JNU_ThrowNoSuchFieldException(JNIEnv *env, const char *msg) -{ - JNU_ThrowByName(env, "java/lang/NoSuchFieldException", msg); -} - -JNIEXPORT void JNICALL -JNU_ThrowNoSuchMethodException(JNIEnv *env, const char *msg) -{ - JNU_ThrowByName(env, "java/lang/NoSuchMethodException", msg); -} - -JNIEXPORT void JNICALL JNU_ThrowClassNotFoundException(JNIEnv *env, const char *msg) { JNU_ThrowByName(env, "java/lang/ClassNotFoundException", msg); } JNIEXPORT void JNICALL -JNU_ThrowNumberFormatException(JNIEnv *env, const char *msg) -{ - JNU_ThrowByName(env, "java/lang/NumberFormatException", msg); -} - -JNIEXPORT void JNICALL JNU_ThrowIOException(JNIEnv *env, const char *msg) { JNU_ThrowByName(env, "java/io/IOException", msg); } -JNIEXPORT void JNICALL -JNU_ThrowNoSuchFieldError(JNIEnv *env, const char *msg) -{ - JNU_ThrowByName(env, "java/lang/NoSuchFieldError", msg); -} - -JNIEXPORT void JNICALL -JNU_ThrowNoSuchMethodError(JNIEnv *env, const char *msg) -{ - JNU_ThrowByName(env, "java/lang/NoSuchMethodError", msg); -} - -JNIEXPORT void JNICALL -JNU_ThrowStringIndexOutOfBoundsException(JNIEnv *env, const char *msg) -{ - JNU_ThrowByName(env, "java/lang/StringIndexOutOfBoundsException", msg); -} - -JNIEXPORT void JNICALL -JNU_ThrowInstantiationException(JNIEnv *env, const char *msg) -{ - JNU_ThrowByName(env, "java/lang/InstantiationException", msg); -} - /* * Throw an exception by name, using the string returned by * getLastErrorString for the detail string. If the last-error @@ -845,12 +791,6 @@ CHECK_NULL(String_value_ID); } -JNIEXPORT jstring -NewStringPlatform(JNIEnv *env, const char *str) -{ - return JNU_NewStringPlatform(env, str); -} - JNIEXPORT jstring JNICALL JNU_NewStringPlatform(JNIEnv *env, const char *str) { @@ -1024,54 +964,6 @@ return cls; } -JNIEXPORT jclass JNICALL -JNU_ClassClass(JNIEnv *env) -{ - static jclass cls = 0; - if (cls == 0) { - jclass c; - if ((*env)->EnsureLocalCapacity(env, 1) < 0) - return 0; - c = (*env)->FindClass(env, "java/lang/Class"); - CHECK_NULL_RETURN(c, NULL); - cls = (*env)->NewGlobalRef(env, c); - (*env)->DeleteLocalRef(env, c); - } - return cls; -} - -JNIEXPORT jclass JNICALL -JNU_ClassObject(JNIEnv *env) -{ - static jclass cls = 0; - if (cls == 0) { - jclass c; - if ((*env)->EnsureLocalCapacity(env, 1) < 0) - return 0; - c = (*env)->FindClass(env, "java/lang/Object"); - CHECK_NULL_RETURN(c, NULL); - cls = (*env)->NewGlobalRef(env, c); - (*env)->DeleteLocalRef(env, c); - } - return cls; -} - -JNIEXPORT jclass JNICALL -JNU_ClassThrowable(JNIEnv *env) -{ - static jclass cls = 0; - if (cls == 0) { - jclass c; - if ((*env)->EnsureLocalCapacity(env, 1) < 0) - return 0; - c = (*env)->FindClass(env, "java/lang/Throwable"); - CHECK_NULL_RETURN(c, NULL); - cls = (*env)->NewGlobalRef(env, c); - (*env)->DeleteLocalRef(env, c); - } - return cls; -} - JNIEXPORT jint JNICALL JNU_CopyObjectArray(JNIEnv *env, jobjectArray dst, jobjectArray src, jint count) @@ -1110,125 +1002,10 @@ return JNI_ERR; } -JNIEXPORT jboolean JNICALL -JNU_Equals(JNIEnv *env, jobject object1, jobject object2) -{ - static jmethodID mid = NULL; - if (mid == NULL) { - jclass objClazz = JNU_ClassObject(env); - CHECK_NULL_RETURN(objClazz, JNI_FALSE); - mid = (*env)->GetMethodID(env, objClazz, "equals", - "(Ljava/lang/Object;)Z"); - CHECK_NULL_RETURN(mid, JNI_FALSE); - } - return (*env)->CallBooleanMethod(env, object1, mid, object2); -} - - -/************************************************************************ - * Thread calls - */ - -static jmethodID Object_waitMID; -static jmethodID Object_notifyMID; -static jmethodID Object_notifyAllMID; - -JNIEXPORT void JNICALL -JNU_MonitorWait(JNIEnv *env, jobject object, jlong timeout) -{ - if (object == NULL) { - JNU_ThrowNullPointerException(env, "JNU_MonitorWait argument"); - return; - } - if (Object_waitMID == NULL) { - jclass cls = JNU_ClassObject(env); - if (cls == NULL) { - return; - } - Object_waitMID = (*env)->GetMethodID(env, cls, "wait", "(J)V"); - if (Object_waitMID == NULL) { - return; - } - } - (*env)->CallVoidMethod(env, object, Object_waitMID, timeout); -} - -JNIEXPORT void JNICALL -JNU_Notify(JNIEnv *env, jobject object) -{ - if (object == NULL) { - JNU_ThrowNullPointerException(env, "JNU_Notify argument"); - return; - } - if (Object_notifyMID == NULL) { - jclass cls = JNU_ClassObject(env); - if (cls == NULL) { - return; - } - Object_notifyMID = (*env)->GetMethodID(env, cls, "notify", "()V"); - if (Object_notifyMID == NULL) { - return; - } - } - (*env)->CallVoidMethod(env, object, Object_notifyMID); -} - -JNIEXPORT void JNICALL -JNU_NotifyAll(JNIEnv *env, jobject object) -{ - if (object == NULL) { - JNU_ThrowNullPointerException(env, "JNU_NotifyAll argument"); - return; - } - if (Object_notifyAllMID == NULL) { - jclass cls = JNU_ClassObject(env); - if (cls == NULL) { - return; - } - Object_notifyAllMID = (*env)->GetMethodID(env, cls,"notifyAll", "()V"); - if (Object_notifyAllMID == NULL) { - return; - } - } - (*env)->CallVoidMethod(env, object, Object_notifyAllMID); -} - - /************************************************************************ * Debugging utilities */ -JNIEXPORT void JNICALL -JNU_PrintString(JNIEnv *env, char *hdr, jstring string) -{ - if (string == NULL) { - fprintf(stderr, "%s: is NULL\n", hdr); - } else { - const char *stringPtr = JNU_GetStringPlatformChars(env, string, 0); - if (stringPtr == 0) - return; - fprintf(stderr, "%s: %s\n", hdr, stringPtr); - JNU_ReleaseStringPlatformChars(env, string, stringPtr); - } -} - -JNIEXPORT void JNICALL -JNU_PrintClass(JNIEnv *env, char* hdr, jobject object) -{ - if (object == NULL) { - fprintf(stderr, "%s: object is NULL\n", hdr); - return; - } else { - jclass cls = (*env)->GetObjectClass(env, object); - jstring clsName = JNU_ToString(env, cls); - if (clsName == NULL) { - JNU_PrintString(env, hdr, clsName); - } - (*env)->DeleteLocalRef(env, cls); - (*env)->DeleteLocalRef(env, clsName); - } -} - JNIEXPORT jstring JNICALL JNU_ToString(JNIEnv *env, jobject object) { @@ -1437,70 +1214,3 @@ } return result; } - -JNIEXPORT void JNICALL -JNU_SetStaticFieldByName(JNIEnv *env, - jboolean *hasException, - const char *classname, - const char *name, - const char *signature, - ...) -{ - jclass cls; - jfieldID fid; - va_list args; - - if ((*env)->EnsureLocalCapacity(env, 3) < 0) - goto done2; - - cls = (*env)->FindClass(env, classname); - if (cls == 0) - goto done2; - - fid = (*env)->GetStaticFieldID(env, cls, name, signature); - if (fid == 0) - goto done1; - - va_start(args, signature); - switch (*signature) { - case '[': - case 'L': - (*env)->SetStaticObjectField(env, cls, fid, va_arg(args, jobject)); - break; - case 'Z': - (*env)->SetStaticBooleanField(env, cls, fid, (jboolean)va_arg(args, int)); - break; - case 'B': - (*env)->SetStaticByteField(env, cls, fid, (jbyte)va_arg(args, int)); - break; - case 'C': - (*env)->SetStaticCharField(env, cls, fid, (jchar)va_arg(args, int)); - break; - case 'S': - (*env)->SetStaticShortField(env, cls, fid, (jshort)va_arg(args, int)); - break; - case 'I': - (*env)->SetStaticIntField(env, cls, fid, va_arg(args, jint)); - break; - case 'J': - (*env)->SetStaticLongField(env, cls, fid, va_arg(args, jlong)); - break; - case 'F': - (*env)->SetStaticFloatField(env, cls, fid, (jfloat)va_arg(args, jdouble)); - break; - case 'D': - (*env)->SetStaticDoubleField(env, cls, fid, va_arg(args, jdouble)); - break; - - default: - (*env)->FatalError(env, "JNU_SetStaticFieldByName: illegal signature"); - } - va_end(args); - - done1: - (*env)->DeleteLocalRef(env, cls); - done2: - if (hasException) { - *hasException = (*env)->ExceptionCheck(env); - } -} diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/native/libjava/jni_util.h --- a/src/java.base/share/native/libjava/jni_util.h Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/native/libjava/jni_util.h Wed Oct 16 15:31:05 2019 +0200 @@ -62,41 +62,14 @@ JNU_ThrowIllegalArgumentException(JNIEnv *env, const char *msg); JNIEXPORT void JNICALL -JNU_ThrowIllegalAccessError(JNIEnv *env, const char *msg); - -JNIEXPORT void JNICALL -JNU_ThrowIllegalAccessException(JNIEnv *env, const char *msg); - -JNIEXPORT void JNICALL JNU_ThrowInternalError(JNIEnv *env, const char *msg); JNIEXPORT void JNICALL JNU_ThrowIOException(JNIEnv *env, const char *msg); JNIEXPORT void JNICALL -JNU_ThrowNoSuchFieldException(JNIEnv *env, const char *msg); - -JNIEXPORT void JNICALL -JNU_ThrowNoSuchMethodException(JNIEnv *env, const char *msg); - -JNIEXPORT void JNICALL JNU_ThrowClassNotFoundException(JNIEnv *env, const char *msg); -JNIEXPORT void JNICALL -JNU_ThrowNumberFormatException(JNIEnv *env, const char *msg); - -JNIEXPORT void JNICALL -JNU_ThrowNoSuchFieldError(JNIEnv *env, const char *msg); - -JNIEXPORT void JNICALL -JNU_ThrowNoSuchMethodError(JNIEnv *env, const char *msg); - -JNIEXPORT void JNICALL -JNU_ThrowStringIndexOutOfBoundsException(JNIEnv *env, const char *msg); - -JNIEXPORT void JNICALL -JNU_ThrowInstantiationException(JNIEnv *env, const char *msg); - /* Throw an exception by name, using the string returned by * getLastErrorString for the detail string. If the last-error * string is NULL, use the given default detail string. @@ -120,9 +93,6 @@ JNU_ThrowIOExceptionWithLastError(JNIEnv *env, const char *defaultDetail); /* Convert between Java strings and i18n C strings */ -JNIEXPORT jstring -NewStringPlatform(JNIEnv *env, const char *str); - JNIEXPORT const char * GetStringPlatformChars(JNIEnv *env, jstring jstr, jboolean *isCopy); @@ -139,15 +109,6 @@ JNIEXPORT jclass JNICALL JNU_ClassString(JNIEnv *env); -JNIEXPORT jclass JNICALL -JNU_ClassClass(JNIEnv *env); - -JNIEXPORT jclass JNICALL -JNU_ClassObject(JNIEnv *env); - -JNIEXPORT jclass JNICALL -JNU_ClassThrowable(JNIEnv *env); - /* Copy count number of arguments from src to dst. Array bounds * and ArrayStoreException are checked. */ @@ -246,36 +207,6 @@ const char *classname, const char *name, const char *sig); -JNIEXPORT void JNICALL -JNU_SetStaticFieldByName(JNIEnv *env, - jboolean *hasException, - const char *classname, - const char *name, - const char *sig, - ...); - - -/* - * Calls the .equals method. - */ -JNIEXPORT jboolean JNICALL -JNU_Equals(JNIEnv *env, jobject object1, jobject object2); - - -/************************************************************************ - * Thread calls - * - * Convenience thread-related calls on the java.lang.Object class. - */ - -JNIEXPORT void JNICALL -JNU_MonitorWait(JNIEnv *env, jobject object, jlong timeout); - -JNIEXPORT void JNICALL -JNU_Notify(JNIEnv *env, jobject object); - -JNIEXPORT void JNICALL -JNU_NotifyAll(JNIEnv *env, jobject object); /************************************************************************ @@ -349,19 +280,15 @@ } \ } while (0) #endif /* __cplusplus */ + /************************************************************************ * Debugging utilities */ -JNIEXPORT void JNICALL -JNU_PrintString(JNIEnv *env, char *hdr, jstring string); - -JNIEXPORT void JNICALL -JNU_PrintClass(JNIEnv *env, char *hdr, jobject object); - JNIEXPORT jstring JNICALL JNU_ToString(JNIEnv *env, jobject object); + /* * Package shorthand for use by native libraries */ @@ -402,8 +329,6 @@ FAST_UTF_8 }; -int getFastEncoding(); - JNIEXPORT void InitializeEncoding(JNIEnv *env, const char *name); void* getProcessHandle(); diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/native/libjava/verify_stub.c --- a/src/java.base/share/native/libjava/verify_stub.c Mon Oct 07 16:48:42 2019 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,59 +0,0 @@ -/* - * Copyright (c) 1999, 2003, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - - -/* - * The real verifier now lives in libverifier.so/verifier.dll. - * - * This dummy exists so that HotSpot will run with the new - * libjava.so/java.dll which is where is it accustomed to finding the - * verifier. - */ - -#include "jni.h" - -struct struct_class_size_info; -typedef struct struct_class_size_info class_size_info; - - -JNIIMPORT jboolean -VerifyClass(JNIEnv *env, jclass cb, char *buffer, jint len); - -JNIIMPORT jboolean -VerifyClassForMajorVersion(JNIEnv *env, jclass cb, char *buffer, jint len, - jint major_version); - -JNIEXPORT jboolean -VerifyClassCodes(JNIEnv *env, jclass cb, char *buffer, jint len) -{ - return VerifyClass(env, cb, buffer, len); -} - -JNIEXPORT jboolean -VerifyClassCodesForMajorVersion(JNIEnv *env, jclass cb, char *buffer, - jint len, jint major_version) -{ - return VerifyClassForMajorVersion(env, cb, buffer, len, major_version); -} diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/native/libverify/check_code.c --- a/src/java.base/share/native/libverify/check_code.c Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/native/libverify/check_code.c Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1994, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1994, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,9 +31,6 @@ Exported function: jboolean - VerifyClass(JNIEnv *env, jclass cb, char *message_buffer, - jint buffer_length) - jboolean VerifyClassForMajorVersion(JNIEnv *env, jclass cb, char *message_buffer, jint buffer_length, jint major_version) @@ -910,20 +907,6 @@ return result; } -#define OLD_FORMAT_MAX_MAJOR_VERSION 48 - -JNIEXPORT jboolean -VerifyClass(JNIEnv *env, jclass cb, char *buffer, jint len) -{ - static int warned = 0; - if (!warned) { - jio_fprintf(stdout, "Warning! An old version of jvm is used. This is not supported.\n"); - warned = 1; - } - return VerifyClassForMajorVersion(env, cb, buffer, len, - OLD_FORMAT_MAX_MAJOR_VERSION); -} - static void verify_field(context_type *context, jclass cb, int field_index) { diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/native/libverify/check_format.c --- a/src/java.base/share/native/libverify/check_format.c Mon Oct 07 16:48:42 2019 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,278 +0,0 @@ -/* - * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include -#include -#include -#include -#include - -#include "jni.h" -#include "jvm.h" - -typedef unsigned short unicode; - -static char * -skip_over_fieldname(char *name, jboolean slash_okay, - unsigned int len); -static char * -skip_over_field_signature(char *name, jboolean void_okay, - unsigned int len); - -/* - * Return non-zero if the character is a valid in JVM class name, zero - * otherwise. The only characters currently disallowed from JVM class - * names are given in the table below: - * - * Character Hex Decimal - * '.' 0x2e 46 - * '/' 0x2f 47 - * ';' 0x3b 59 - * '[' 0x5b 91 - * - * (Method names have further restrictions dealing with the '<' and - * '>' characters.) - */ -static int isJvmIdentifier(unicode ch) { - if( ch > 91 || ch < 46 ) - return 1; /* Lowercase ASCII letters are > 91 */ - else { /* 46 <= ch <= 91 */ - if (ch <= 90 && ch >= 60) { - return 1; /* Uppercase ASCII recognized here */ - } else { /* ch == 91 || 46 <= ch <= 59 */ - if (ch == 91 || ch == 59 || ch <= 47) - return 0; - else - return 1; - } - } -} - -static unicode -next_utf2unicode(char **utfstring_ptr, int * valid) -{ - unsigned char *ptr = (unsigned char *)(*utfstring_ptr); - unsigned char ch, ch2, ch3; - int length = 1; /* default length */ - unicode result = 0x80; /* default bad result; */ - *valid = 1; - switch ((ch = ptr[0]) >> 4) { - default: - result = ch; - break; - - case 0x8: case 0x9: case 0xA: case 0xB: case 0xF: - /* Shouldn't happen. */ - *valid = 0; - break; - - case 0xC: case 0xD: - /* 110xxxxx 10xxxxxx */ - if (((ch2 = ptr[1]) & 0xC0) == 0x80) { - unsigned char high_five = ch & 0x1F; - unsigned char low_six = ch2 & 0x3F; - result = (high_five << 6) + low_six; - length = 2; - } - break; - - case 0xE: - /* 1110xxxx 10xxxxxx 10xxxxxx */ - if (((ch2 = ptr[1]) & 0xC0) == 0x80) { - if (((ch3 = ptr[2]) & 0xC0) == 0x80) { - unsigned char high_four = ch & 0x0f; - unsigned char mid_six = ch2 & 0x3f; - unsigned char low_six = ch3 & 0x3f; - result = (((high_four << 6) + mid_six) << 6) + low_six; - length = 3; - } else { - length = 2; - } - } - break; - } /* end of switch */ - - *utfstring_ptr = (char *)(ptr + length); - return result; -} - -/* Take pointer to a string. Skip over the longest part of the string that - * could be taken as a fieldname. Allow '/' if slash_okay is JNI_TRUE. - * - * Return a pointer to just past the fieldname. Return NULL if no fieldname - * at all was found, or in the case of slash_okay being true, we saw - * consecutive slashes (meaning we were looking for a qualified path but - * found something that was badly-formed). - */ -static char * -skip_over_fieldname(char *name, jboolean slash_okay, - unsigned int length) -{ - char *p; - unicode ch; - unicode last_ch = 0; - int valid = 1; - /* last_ch == 0 implies we are looking at the first char. */ - for (p = name; p != name + length; last_ch = ch) { - char *old_p = p; - ch = *p; - if (ch < 128) { - p++; - if (isJvmIdentifier(ch)) { - continue; - } - } else { - char *tmp_p = p; - ch = next_utf2unicode(&tmp_p, &valid); - if (valid == 0) - return 0; - p = tmp_p; - if (isJvmIdentifier(ch)) { - continue; - } - } - - if (slash_okay && ch == '/' && last_ch) { - if (last_ch == '/') { - return 0; /* Don't permit consecutive slashes */ - } - } else if (ch == '_' || ch == '$') { - } else { - return last_ch ? old_p : 0; - } - } - return last_ch ? p : 0; -} - -/* Take pointer to a string. Skip over the longest part of the string that - * could be taken as a field signature. Allow "void" if void_okay. - * - * Return a pointer to just past the signature. Return NULL if no legal - * signature is found. - */ - -static char * -skip_over_field_signature(char *name, jboolean void_okay, - unsigned int length) -{ - unsigned int array_dim = 0; - for (;length > 0;) { - switch (name[0]) { - case JVM_SIGNATURE_VOID: - if (!void_okay) return 0; - /* FALL THROUGH */ - case JVM_SIGNATURE_BOOLEAN: - case JVM_SIGNATURE_BYTE: - case JVM_SIGNATURE_CHAR: - case JVM_SIGNATURE_SHORT: - case JVM_SIGNATURE_INT: - case JVM_SIGNATURE_FLOAT: - case JVM_SIGNATURE_LONG: - case JVM_SIGNATURE_DOUBLE: - return name + 1; - - case JVM_SIGNATURE_CLASS: { - /* Skip over the classname, if one is there. */ - char *p = - skip_over_fieldname(name + 1, JNI_TRUE, --length); - /* The next character better be a semicolon. */ - if (p && p - name - 1 > 0 && p[0] == ';') - return p + 1; - return 0; - } - - case JVM_SIGNATURE_ARRAY: - array_dim++; - /* JVMS 2nd ed. 4.10 */ - /* The number of dimensions in an array is limited to 255 ... */ - if (array_dim > 255) { - return 0; - } - /* The rest of what's there better be a legal signature. */ - name++; - length--; - void_okay = JNI_FALSE; - break; - - default: - return 0; - } - } - return 0; -} - - -/* Used in java/lang/Class.c */ -/* Determine if the specified name is legal - * UTF name for a classname. - * - * Note that this routine expects the internal form of qualified classes: - * the dots should have been replaced by slashes. - */ -JNIEXPORT jboolean -VerifyClassname(char *name, jboolean allowArrayClass) -{ - size_t s = strlen(name); - assert(s <= UINT_MAX); - unsigned int length = (unsigned int)s; - char *p; - - if (length > 0 && name[0] == JVM_SIGNATURE_ARRAY) { - if (!allowArrayClass) { - return JNI_FALSE; - } else { - /* Everything that's left better be a field signature */ - p = skip_over_field_signature(name, JNI_FALSE, length); - } - } else { - /* skip over the fieldname. Slashes are okay */ - p = skip_over_fieldname(name, JNI_TRUE, length); - } - return (p != 0 && p - name == (ptrdiff_t)length); -} - -/* - * Translates '.' to '/'. Returns JNI_TRUE is any / were present. - */ -JNIEXPORT jboolean -VerifyFixClassname(char *name) -{ - char *p = name; - jboolean slashesFound = JNI_FALSE; - int valid = 1; - - while (valid != 0 && *p != '\0') { - if (*p == '/') { - slashesFound = JNI_TRUE; - p++; - } else if (*p == '.') { - *p++ = '/'; - } else { - next_utf2unicode(&p, &valid); - } - } - - return slashesFound && valid != 0; -} diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/share/native/libzip/Deflater.c --- a/src/java.base/share/native/libzip/Deflater.c Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/share/native/libzip/Deflater.c Wed Oct 16 15:31:05 2019 +0200 @@ -257,7 +257,7 @@ res = doDeflate(env, addr, input, inputLen, output + outputOff, outputLen, flush, params); - (*env)->ReleasePrimitiveArrayCritical(env, outputArray, input, 0); + (*env)->ReleasePrimitiveArrayCritical(env, outputArray, output, 0); retVal = checkDeflateStatus(env, addr, inputLen, outputLen, params, res); return retVal; diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/unix/native/libjava/jdk_util_md.c --- a/src/java.base/unix/native/libjava/jdk_util_md.c Mon Oct 07 16:48:42 2019 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2004, 2005, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include -#include "jdk_util.h" - -int JDK_InitJvmHandle() { - /* nop */ - return 1; -} - -void* JDK_FindJvmEntry(const char* name) { - return dlsym(RTLD_DEFAULT, name); -} diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/unix/native/libnio/ch/Net.c --- a/src/java.base/unix/native/libnio/ch/Net.c Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/unix/native/libnio/ch/Net.c Wed Oct 16 15:31:05 2019 +0200 @@ -66,34 +66,6 @@ #endif #endif -#if defined(_AIX) - #ifndef IP_BLOCK_SOURCE - #define IP_BLOCK_SOURCE 58 /* Block data from a given source to a given group */ - #define IP_UNBLOCK_SOURCE 59 /* Unblock data from a given source to a given group */ - #define IP_ADD_SOURCE_MEMBERSHIP 60 /* Join a source-specific group */ - #define IP_DROP_SOURCE_MEMBERSHIP 61 /* Leave a source-specific group */ - #endif - - #ifndef MCAST_BLOCK_SOURCE - #define MCAST_BLOCK_SOURCE 64 - #define MCAST_UNBLOCK_SOURCE 65 - #define MCAST_JOIN_SOURCE_GROUP 66 - #define MCAST_LEAVE_SOURCE_GROUP 67 - - /* This means we're on AIX 5.3 and 'group_source_req' and 'ip_mreq_source' aren't defined as well */ - struct group_source_req { - uint32_t gsr_interface; - struct sockaddr_storage gsr_group; - struct sockaddr_storage gsr_source; - }; - struct ip_mreq_source { - struct in_addr imr_multiaddr; /* IP multicast address of group */ - struct in_addr imr_sourceaddr; /* IP address of source */ - struct in_addr imr_interface; /* local IP address of interface */ - }; - #endif -#endif /* _AIX */ - #define COPY_INET6_ADDRESS(env, source, target) \ (*env)->GetByteArrayRegion(env, source, 0, 16, target) diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/windows/classes/java/lang/ProcessImpl.java --- a/src/java.base/windows/classes/java/lang/ProcessImpl.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/windows/classes/java/lang/ProcessImpl.java Wed Oct 16 15:31:05 2019 +0200 @@ -38,6 +38,7 @@ import java.security.AccessController; import java.security.PrivilegedAction; import java.util.ArrayList; +import java.util.Locale; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; @@ -46,6 +47,8 @@ import jdk.internal.access.JavaIOFileDescriptorAccess; import jdk.internal.access.SharedSecrets; import jdk.internal.ref.CleanerFactory; +import sun.security.action.GetBooleanAction; +import sun.security.action.GetPropertyAction; /* This class is for the exclusive use of ProcessBuilder.start() to * create new processes. @@ -209,12 +212,15 @@ private static final int VERIFICATION_CMD_BAT = 0; private static final int VERIFICATION_WIN32 = 1; - private static final int VERIFICATION_LEGACY = 2; + private static final int VERIFICATION_WIN32_SAFE = 2; // inside quotes not allowed + private static final int VERIFICATION_LEGACY = 3; + // See Command shell overview for documentation of special characters. + // https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-xp/bb490954(v=technet.10) private static final char ESCAPE_VERIFICATION[][] = { // We guarantee the only command file execution for implicit [cmd.exe] run. // http://technet.microsoft.com/en-us/library/bb490954.aspx {' ', '\t', '<', '>', '&', '|', '^'}, - + {' ', '\t', '<', '>'}, {' ', '\t', '<', '>'}, {' ', '\t'} }; @@ -231,8 +237,25 @@ cmdbuf.append(' '); String s = cmd[i]; if (needsEscaping(verificationType, s)) { - cmdbuf.append('"').append(s); + cmdbuf.append('"'); + if (verificationType == VERIFICATION_WIN32_SAFE) { + // Insert the argument, adding '\' to quote any interior quotes + int length = s.length(); + for (int j = 0; j < length; j++) { + char c = s.charAt(j); + if (c == DOUBLEQUOTE) { + int count = countLeadingBackslash(verificationType, s, j); + while (count-- > 0) { + cmdbuf.append(BACKSLASH); // double the number of backslashes + } + cmdbuf.append(BACKSLASH); // backslash to quote the quote + } + cmdbuf.append(c); + } + } else { + cmdbuf.append(s); + } // The code protects the [java.exe] and console command line // parser, that interprets the [\"] combination as an escape // sequence for the ["] char. @@ -245,8 +268,9 @@ // command line parser. The case of the [""] tail escape // sequence could not be realized due to the argument validation // procedure. - if ((verificationType != VERIFICATION_CMD_BAT) && s.endsWith("\\")) { - cmdbuf.append('\\'); + int count = countLeadingBackslash(verificationType, s, s.length()); + while (count-- > 0) { + cmdbuf.append(BACKSLASH); // double the number of backslashes } cmdbuf.append('"'); } else { @@ -256,26 +280,16 @@ return cmdbuf.toString(); } - private static boolean isQuoted(boolean noQuotesInside, String arg, - String errorMessage) { - int lastPos = arg.length() - 1; - if (lastPos >=1 && arg.charAt(0) == '"' && arg.charAt(lastPos) == '"') { - // The argument has already been quoted. - if (noQuotesInside) { - if (arg.indexOf('"', 1) != lastPos) { - // There is ["] inside. - throw new IllegalArgumentException(errorMessage); - } - } - return true; - } - if (noQuotesInside) { - if (arg.indexOf('"') >= 0) { - // There is ["] inside. - throw new IllegalArgumentException(errorMessage); - } - } - return false; + /** + * Return the argument without quotes (1st and last) if present, else the arg. + * @param str a string + * @return the string without 1st and last quotes + */ + private static String unQuote(String str) { + int len = str.length(); + return (len >= 2 && str.charAt(0) == DOUBLEQUOTE && str.charAt(len - 1) == DOUBLEQUOTE) + ? str.substring(1, len - 1) + : str; } private static boolean needsEscaping(int verificationType, String arg) { @@ -286,9 +300,26 @@ // For [.exe] or [.com] file the unpaired/internal ["] // in the argument is not a problem. - boolean argIsQuoted = isQuoted( - (verificationType == VERIFICATION_CMD_BAT), - arg, "Argument has embedded quote, use the explicit CMD.EXE call."); + String unquotedArg = unQuote(arg); + boolean argIsQuoted = !arg.equals(unquotedArg); + boolean embeddedQuote = unquotedArg.indexOf(DOUBLEQUOTE) >= 0; + + switch (verificationType) { + case VERIFICATION_CMD_BAT: + if (embeddedQuote) { + throw new IllegalArgumentException("Argument has embedded quote, " + + "use the explicit CMD.EXE call."); + } + break; // break determine whether to quote + case VERIFICATION_WIN32_SAFE: + if (argIsQuoted && embeddedQuote) { + throw new IllegalArgumentException("Malformed argument has embedded quote: " + + unquotedArg); + } + break; + default: + break; + } if (!argIsQuoted) { char testEscape[] = ESCAPE_VERIFICATION[verificationType]; @@ -304,13 +335,13 @@ private static String getExecutablePath(String path) throws IOException { - boolean pathIsQuoted = isQuoted(true, path, - "Executable name has embedded quote, split the arguments"); - + String name = unQuote(path); + if (name.indexOf(DOUBLEQUOTE) >= 0) { + throw new IllegalArgumentException("Executable name has embedded quote, " + + "split the arguments: " + name); + } // Win32 CreateProcess requires path to be normalized - File fileToRun = new File(pathIsQuoted - ? path.substring(1, path.length() - 1) - : path); + File fileToRun = new File(name); // From the [CreateProcess] function documentation: // @@ -325,13 +356,26 @@ // sequence:..." // // In practice ANY non-existent path is extended by [.exe] extension - // in the [CreateProcess] funcion with the only exception: + // in the [CreateProcess] function with the only exception: // the path ends by (.) return fileToRun.getPath(); } + /** + * An executable is any program that is an EXE or does not have an extension + * and the Windows createProcess will be looking for .exe. + * The comparison is case insensitive based on the name. + * @param executablePath the executable file + * @return true if the path ends in .exe or does not have an extension. + */ + private boolean isExe(String executablePath) { + File file = new File(executablePath); + String upName = file.getName().toUpperCase(Locale.ROOT); + return (upName.endsWith(".EXE") || upName.indexOf('.') < 0); + } + // Old version that can be bypassed private boolean isShellFile(String executablePath) { String upPath = executablePath.toUpperCase(); return (upPath.endsWith(".CMD") || upPath.endsWith(".BAT")); @@ -342,6 +386,21 @@ return argbuf.append('"').append(arg).append('"').toString(); } + // Count backslashes before start index of string. + // .bat files don't include backslashes as part of the quote + private static int countLeadingBackslash(int verificationType, + CharSequence input, int start) { + if (verificationType == VERIFICATION_CMD_BAT) + return 0; + int j; + for (j = start - 1; j >= 0 && input.charAt(j) == BACKSLASH; j--) { + // just scanning backwards + } + return (start - 1) - j; // number of BACKSLASHES + } + + private static final char DOUBLEQUOTE = '\"'; + private static final char BACKSLASH = '\\'; private final long handle; private final ProcessHandle processHandle; @@ -358,15 +417,13 @@ throws IOException { String cmdstr; - SecurityManager security = System.getSecurityManager(); - boolean allowAmbiguousCommands = false; - if (security == null) { - allowAmbiguousCommands = true; - String value = System.getProperty("jdk.lang.Process.allowAmbiguousCommands"); - if (value != null) - allowAmbiguousCommands = !"false".equalsIgnoreCase(value); - } - if (allowAmbiguousCommands) { + final SecurityManager security = System.getSecurityManager(); + final String value = GetPropertyAction. + privilegedGetProperty("jdk.lang.Process.allowAmbiguousCommands", + (security == null ? "true" : "false")); + final boolean allowAmbiguousCommands = !"false".equalsIgnoreCase(value); + + if (allowAmbiguousCommands && security == null) { // Legacy mode. // Normalize path if possible. @@ -413,11 +470,12 @@ // Quotation protects from interpretation of the [path] argument as // start of longer path with spaces. Quotation has no influence to // [.exe] extension heuristic. + boolean isShell = allowAmbiguousCommands ? isShellFile(executablePath) + : !isExe(executablePath); cmdstr = createCommandLine( - // We need the extended verification procedure for CMD files. - isShellFile(executablePath) - ? VERIFICATION_CMD_BAT - : VERIFICATION_WIN32, + // We need the extended verification procedures + isShell ? VERIFICATION_CMD_BAT + : (allowAmbiguousCommands ? VERIFICATION_WIN32 : VERIFICATION_WIN32_SAFE), quoteString(executablePath), cmd); } diff -r 54c1ba464b78 -r 28c7e6711871 src/java.base/windows/native/libjava/jdk_util_md.c --- a/src/java.base/windows/native/libjava/jdk_util_md.c Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.base/windows/native/libjava/jdk_util_md.c Wed Oct 16 15:31:05 2019 +0200 @@ -28,17 +28,6 @@ #define JVM_DLL "jvm.dll" -static HMODULE jvm_handle = NULL; - -int JDK_InitJvmHandle() { - jvm_handle = GetModuleHandle(JVM_DLL); - return (jvm_handle != NULL); -} - -void* JDK_FindJvmEntry(const char* name) { - return (void*) GetProcAddress(jvm_handle, name); -} - JNIEXPORT HMODULE JDK_LoadSystemLibrary(const char* name) { HMODULE handle = NULL; char path[MAX_PATH]; diff -r 54c1ba464b78 -r 28c7e6711871 src/java.datatransfer/share/classes/java/awt/datatransfer/MimeType.java --- a/src/java.datatransfer/share/classes/java/awt/datatransfer/MimeType.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.datatransfer/share/classes/java/awt/datatransfer/MimeType.java Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -346,9 +346,9 @@ return newObj; } - private String primaryType; - private String subType; - private MimeTypeParameterList parameters; + private transient String primaryType; + private transient String subType; + private transient MimeTypeParameterList parameters; // below here be scary parsing related things diff -r 54c1ba464b78 -r 28c7e6711871 src/java.desktop/macosx/classes/sun/awt/CGraphicsDevice.java --- a/src/java.desktop/macosx/classes/sun/awt/CGraphicsDevice.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.desktop/macosx/classes/sun/awt/CGraphicsDevice.java Wed Oct 16 15:31:05 2019 +0200 @@ -61,6 +61,10 @@ public CGraphicsDevice(final int displayID) { this.displayID = displayID; config = CGLGraphicsConfig.getConfig(this, displayID, 0); + // initializes default device state, might be redundant step since we + // call "displayChanged()" later anyway, but we do not want to leave the + // device in an inconsistent state after construction + displayChanged(); } /** diff -r 54c1ba464b78 -r 28c7e6711871 src/java.desktop/macosx/classes/sun/java2d/opengl/CGLSurfaceData.java --- a/src/java.desktop/macosx/classes/sun/java2d/opengl/CGLSurfaceData.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.desktop/macosx/classes/sun/java2d/opengl/CGLSurfaceData.java Wed Oct 16 15:31:05 2019 +0200 @@ -25,7 +25,6 @@ package sun.java2d.opengl; -import java.awt.Graphics; import java.awt.GraphicsConfiguration; import java.awt.GraphicsDevice; import java.awt.GraphicsEnvironment; @@ -33,9 +32,7 @@ import java.awt.Rectangle; import java.awt.image.ColorModel; -import sun.java2d.SunGraphics2D; import sun.java2d.SurfaceData; - import sun.lwawt.macosx.CPlatformView; public abstract class CGLSurfaceData extends OGLSurfaceData { @@ -342,43 +339,4 @@ return offscreenImage; } } - - // Mac OS X specific APIs for JOGL/Java2D bridge... - - // given a surface create and attach GL context, then return it - private static native long createCGLContextOnSurface(CGLSurfaceData sd, - long sharedContext); - - public static long createOGLContextOnSurface(Graphics g, long sharedContext) { - SurfaceData sd = ((SunGraphics2D) g).surfaceData; - if ((sd instanceof CGLSurfaceData) == true) { - CGLSurfaceData cglsd = (CGLSurfaceData) sd; - return createCGLContextOnSurface(cglsd, sharedContext); - } else { - return 0L; - } - } - - // returns whether or not the makeCurrent operation succeeded - static native boolean makeCGLContextCurrentOnSurface(CGLSurfaceData sd, - long ctx); - - public static boolean makeOGLContextCurrentOnSurface(Graphics g, long ctx) { - SurfaceData sd = ((SunGraphics2D) g).surfaceData; - if ((ctx != 0L) && ((sd instanceof CGLSurfaceData) == true)) { - CGLSurfaceData cglsd = (CGLSurfaceData) sd; - return makeCGLContextCurrentOnSurface(cglsd, ctx); - } else { - return false; - } - } - - // additional cleanup - private static native void destroyCGLContext(long ctx); - - public static void destroyOGLContext(long ctx) { - if (ctx != 0L) { - destroyCGLContext(ctx); - } - } } diff -r 54c1ba464b78 -r 28c7e6711871 src/java.desktop/macosx/classes/sun/lwawt/macosx/LWCToolkit.java --- a/src/java.desktop/macosx/classes/sun/lwawt/macosx/LWCToolkit.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.desktop/macosx/classes/sun/lwawt/macosx/LWCToolkit.java Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -444,6 +444,7 @@ fontHints.put(RenderingHints.KEY_TEXT_ANTIALIASING, RenderingHints.VALUE_TEXT_ANTIALIAS_LCD_HRGB); desktopProperties.put(SunToolkit.DESKTOPFONTHINTS, fontHints); desktopProperties.put("awt.mouse.numButtons", BUTTONS); + desktopProperties.put("awt.multiClickInterval", getMultiClickTime()); // These DnD properties must be set, otherwise Swing ends up spewing NPEs // all over the place. The values came straight off of MToolkit. @@ -538,6 +539,11 @@ return BUTTONS; } + /** + * Returns the double-click time interval in ms. + */ + private static native int getMultiClickTime(); + @Override public boolean isTraySupported() { return true; diff -r 54c1ba464b78 -r 28c7e6711871 src/java.desktop/macosx/native/libawt_lwawt/awt/LWCToolkit.m --- a/src/java.desktop/macosx/native/libawt_lwawt/awt/LWCToolkit.m Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.desktop/macosx/native/libawt_lwawt/awt/LWCToolkit.m Wed Oct 16 15:31:05 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -831,3 +831,19 @@ } return JNI_FALSE; } + +/* + * Class: sun_lwawt_macosx_LWCToolkit + * Method: getMultiClickTime + * Signature: ()I + */ +JNIEXPORT jint JNICALL +Java_sun_lwawt_macosx_LWCToolkit_getMultiClickTime(JNIEnv *env, jclass klass) { + __block jint multiClickTime = 0; + JNF_COCOA_ENTER(env); + [JNFRunLoop performOnMainThreadWaiting:YES withBlock:^(){ + multiClickTime = (jint)([NSEvent doubleClickInterval] * 1000); + }]; + JNF_COCOA_EXIT(env); + return multiClickTime; +} diff -r 54c1ba464b78 -r 28c7e6711871 src/java.desktop/macosx/native/libawt_lwawt/java2d/opengl/CGLGraphicsConfig.m --- a/src/java.desktop/macosx/native/libawt_lwawt/java2d/opengl/CGLGraphicsConfig.m Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.desktop/macosx/native/libawt_lwawt/java2d/opengl/CGLGraphicsConfig.m Wed Oct 16 15:31:05 2019 +0200 @@ -152,7 +152,6 @@ AWT_ASSERT_APPKIT_THREAD; jint displayID = (jint)[(NSNumber *)[argValue objectAtIndex: 0] intValue]; - jint pixfmt = (jint)[(NSNumber *)[argValue objectAtIndex: 1] intValue]; jint swapInterval = (jint)[(NSNumber *)[argValue objectAtIndex: 2] intValue]; JNIEnv *env = [ThreadUtilities getJNIEnvUncached]; [argValue removeAllObjects]; @@ -161,11 +160,7 @@ NSAutoreleasePool* pool = [[NSAutoreleasePool alloc] init]; - CGOpenGLDisplayMask glMask = (CGOpenGLDisplayMask)pixfmt; if (sharedContext == NULL) { - if (glMask == 0) { - glMask = CGDisplayIDToOpenGLDisplayMask(displayID); - } NSOpenGLPixelFormatAttribute attrs[] = { NSOpenGLPFAAllowOfflineRenderers, @@ -176,16 +171,17 @@ NSOpenGLPFAColorSize, 32, NSOpenGLPFAAlphaSize, 8, NSOpenGLPFADepthSize, 16, - NSOpenGLPFAScreenMask, glMask, 0 }; sharedPixelFormat = [[NSOpenGLPixelFormat alloc] initWithAttributes:attrs]; if (sharedPixelFormat == nil) { - J2dRlsTraceLn(J2D_TRACE_ERROR, "CGLGraphicsConfig_getCGLConfigInfo: shared NSOpenGLPixelFormat is NULL"); - [argValue addObject: [NSNumber numberWithLong: 0L]]; - return; + J2dRlsTraceLn(J2D_TRACE_ERROR, + "CGLGraphicsConfig_getCGLConfigInfo: shared NSOpenGLPixelFormat is NULL"); + + [argValue addObject: [NSNumber numberWithLong: 0L]]; + return; } sharedContext = diff -r 54c1ba464b78 -r 28c7e6711871 src/java.desktop/share/classes/java/awt/Font.java --- a/src/java.desktop/share/classes/java/awt/Font.java Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.desktop/share/classes/java/awt/Font.java Wed Oct 16 15:31:05 2019 +0200 @@ -1929,6 +1929,7 @@ // value is the default. if (fRequestedAttributes != null) { + try { values = getAttributeValues(); // init AttributeValues extras = AttributeValues.fromSerializableHashtable(fRequestedAttributes); @@ -1938,10 +1939,13 @@ values = getAttributeValues().merge(extras); this.nonIdentityTx = values.anyNonDefault(EXTRA_MASK); this.hasLayoutAttributes = values.anyNonDefault(LAYOUT_MASK); - + } catch (Throwable t) { + throw new IOException(t); + } finally { fRequestedAttributes = null; // don't need it any more } } + } /** * Returns the number of glyphs in this {@code Font}. Glyph codes diff -r 54c1ba464b78 -r 28c7e6711871 src/java.desktop/share/classes/java/awt/doc-files/AWTThreadIssues.html --- a/src/java.desktop/share/classes/java/awt/doc-files/AWTThreadIssues.html Mon Oct 07 16:48:42 2019 +0200 +++ b/src/java.desktop/share/classes/java/awt/doc-files/AWTThreadIssues.html Wed Oct 16 15:31:05 2019 +0200 @@ -5,7 +5,7 @@ AWT Threading Issues