# HG changeset patch # User herrick # Date 1574098806 18000 # Node ID 4ad81e9e30fdf98428432f3ca58c345d65012d49 # Parent 4d58a35f3cfa5caaa64a83c26c682ece0d2f131c# Parent 5ac4a49f539939f0dc659a25cbc256eb6d801392 merge diff -r 4d58a35f3cfa -r 4ad81e9e30fd .hgtags --- a/.hgtags Wed Nov 13 17:21:31 2019 -0500 +++ b/.hgtags Mon Nov 18 12:40:06 2019 -0500 @@ -595,3 +595,4 @@ 54ffb15c48399dd59922ee22bb592d815307e77c jdk-14+20 c16ac7a2eba4e73cb4f7ee9294dd647860eebff0 jdk-14+21 83810b7d12e7ff761ad3dd91f323a22dad96f108 jdk-14+22 +15936b142f86731afa4b1a2c0fe4a01e806c4944 jdk-14+23 diff -r 4d58a35f3cfa -r 4ad81e9e30fd make/RunTests.gmk --- a/make/RunTests.gmk Wed Nov 13 17:21:31 2019 -0500 +++ b/make/RunTests.gmk Mon Nov 18 12:40:06 2019 -0500 @@ -247,11 +247,29 @@ CORES_DIVIDER := 4 endif endif + # For some big multi-core machines with low ulimit -u setting we hit the max + # threads/process limit. In such a setup the memory/cores-only-guided + # TEST_JOBS config is insufficient. From experience a concurrency setting of + # 14 works reasonably well for low ulimit values (<= 4096). Thus, use + # divider 4096/14. For high ulimit -u values this shouldn't make a difference. + ULIMIT_DIVIDER := (4096/14) + PROC_ULIMIT := -1 + ifneq ($(OPENJDK_TARGET_OS), windows) + PROC_ULIMIT := $(shell $(ULIMIT) -u) + ifeq ($(PROC_ULIMIT), unlimited) + PROC_ULIMIT := -1 + endif + endif MEMORY_DIVIDER := 2048 TEST_JOBS := $(shell $(AWK) \ 'BEGIN { \ c = $(NUM_CORES) / $(CORES_DIVIDER); \ m = $(MEMORY_SIZE) / $(MEMORY_DIVIDER); \ + u = $(PROC_ULIMIT); \ + if (u > -1) { \ + u = u / $(ULIMIT_DIVIDER); \ + if (u < c) c = u; \ + } \ if (c > m) c = m; \ c = c * $(TEST_JOBS_FACTOR); \ c = c * $(TEST_JOBS_FACTOR_JDL); \ diff -r 4d58a35f3cfa -r 4ad81e9e30fd make/RunTestsPrebuiltSpec.gmk --- a/make/RunTestsPrebuiltSpec.gmk Wed Nov 13 17:21:31 2019 -0500 +++ b/make/RunTestsPrebuiltSpec.gmk Mon Nov 18 12:40:06 2019 -0500 @@ -175,6 +175,7 @@ EXPR := expr FILE := file HG := hg +ULIMIT := ulimit # On Solaris gnu versions of some tools are required. ifeq ($(OPENJDK_BUILD_OS), solaris) diff -r 4d58a35f3cfa -r 4ad81e9e30fd make/autoconf/basics.m4 --- a/make/autoconf/basics.m4 Wed Nov 13 17:21:31 2019 -0500 +++ b/make/autoconf/basics.m4 Mon Nov 18 12:40:06 2019 -0500 @@ -574,6 +574,26 @@ ]) ############################################################################### +# Like BASIC_REQUIRE_PROGS but also allows for bash built-ins +# $1: variable to set +# $2: executable name (or list of names) to look for +# $3: [path] +AC_DEFUN([BASIC_REQUIRE_BUILTIN_PROGS], +[ + BASIC_SETUP_TOOL($1, [AC_PATH_PROGS($1, $2, , $3)]) + if test "x[$]$1" = x; then + AC_MSG_NOTICE([Required tool $2 not found in PATH, checking built-in]) + if help $2 > /dev/null 2>&1; then + AC_MSG_NOTICE([Found $2 as shell built-in. Using it]) + $1="$2" + else + AC_MSG_ERROR([Required tool $2 also not found as built-in.]) + fi + fi + BASIC_CHECK_NONEMPTY($1) +]) + +############################################################################### # Setup the most fundamental tools that relies on not much else to set up, # but is used by much of the early bootstrap code. AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS], @@ -1284,6 +1304,9 @@ elif test "x$OPENJDK_TARGET_OS" = "xsolaris"; then BASIC_REQUIRE_PROGS(ELFEDIT, elfedit) fi + if ! test "x$OPENJDK_TARGET_OS" = "xwindows"; then + BASIC_REQUIRE_BUILTIN_PROGS(ULIMIT, ulimit) + fi ]) ############################################################################### diff -r 4d58a35f3cfa -r 4ad81e9e30fd make/autoconf/spec.gmk.in --- a/make/autoconf/spec.gmk.in Wed Nov 13 17:21:31 2019 -0500 +++ b/make/autoconf/spec.gmk.in Mon Nov 18 12:40:06 2019 -0500 @@ -767,6 +767,7 @@ XCODEBUILD=@XCODEBUILD@ DTRACE := @DTRACE@ FIXPATH:=@FIXPATH@ +ULIMIT:=@ULIMIT@ TAR_TYPE:=@TAR_TYPE@ TAR_CREATE_EXTRA_PARAM:=@TAR_CREATE_EXTRA_PARAM@ diff -r 4d58a35f3cfa -r 4ad81e9e30fd make/data/cacerts/luxtrustglobalroot2ca --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/data/cacerts/luxtrustglobalroot2ca Mon Nov 18 12:40:06 2019 -0500 @@ -0,0 +1,40 @@ +Owner: CN=LuxTrust Global Root 2, O=LuxTrust S.A., C=LU +Issuer: CN=LuxTrust Global Root 2, O=LuxTrust S.A., C=LU +Serial number: a7ea6df4b449eda6a24859ee6b815d3167fbbb1 +Valid from: Thu Mar 05 13:21:57 GMT 2015 until: Mon Mar 05 13:21:57 GMT 2035 +Signature algorithm name: SHA256withRSA +Subject Public Key Algorithm: 4096-bit RSA key +Version: 3 +-----BEGIN CERTIFICATE----- +MIIFwzCCA6ugAwIBAgIUCn6m30tEntpqJIWe5rgV0xZ/u7EwDQYJKoZIhvcNAQEL +BQAwRjELMAkGA1UEBhMCTFUxFjAUBgNVBAoMDUx1eFRydXN0IFMuQS4xHzAdBgNV +BAMMFkx1eFRydXN0IEdsb2JhbCBSb290IDIwHhcNMTUwMzA1MTMyMTU3WhcNMzUw +MzA1MTMyMTU3WjBGMQswCQYDVQQGEwJMVTEWMBQGA1UECgwNTHV4VHJ1c3QgUy5B +LjEfMB0GA1UEAwwWTHV4VHJ1c3QgR2xvYmFsIFJvb3QgMjCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBANeFl78RmOnwYoNMPIf5U2o3C/IPPIfOb9wmKb3F +ibrJgz337spbxm1Jc7TJRqMbNBM/wYlFV/TZsfs2ZUv7COJIcRHIbjuend+JZTem +hfY7RBi2xjcwYkSSl2l9QjAk5A0MiWtj3sXh306pFGxT4GHO9hcvHTy95iJMHZP1 +EMShduxq3sVs35a0VkBCwGKSMKEtFZSg0iAGCW5qbeXrt77U8PEVfIvmTroTzEsn +Xpk8F12PgX8zPU/TPxvsXD/wPEx1bvKm1Z3aLQdjAsZy6ZS8TEmVT4hSyNvoaYL4 +zDRbIvCGp4m9SAptZoFtyMhk+wHh9OHe2Z7d21vUKpkmFRseTJIpgp7VkoGSQXAZ +96Tlk0u8d2cx3Rz9MXANF5kM+Qw5GSoXtTBxVdUPrljhPS80m8+f9niFwpN6cj5m +j5wWEWCPnolvZ77gR1o7DJpni89Gxq44o/KnvObWhWszJHAiS8sIm7vI+AIpHb4g +DEa/a4ebsypmQjVGbKq6rfmYe+lQVRQxv7HaLe2ArWgk+2mr2HETMOZns4dA/Yl+ +8kPREd8vZS9kzl8UubG/Mb2HeFpZZYiq/FkySIbWTLkpS5XTdvN3JW1CHDiDTf2j +X5t/Lax5Gw5CMZdjpPuKadUiDTSQMC6otOBttpSsvItO13D8xTiOZCXhTTmQzsmH +hFhxAgMBAAGjgagwgaUwDwYDVR0TAQH/BAUwAwEB/zBCBgNVHSAEOzA5MDcGByuB +KwEBAQowLDAqBggrBgEFBQcCARYeaHR0cHM6Ly9yZXBvc2l0b3J5Lmx1eHRydXN0 +Lmx1MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBT/GCh2+UgFLKGu8SsbK7JT ++Et8szAdBgNVHQ4EFgQU/xgodvlIBSyhrvErGyuyU/hLfLMwDQYJKoZIhvcNAQEL +BQADggIBAGoZFO1uecEsh9QNcH7X9njJCwROxLHOk3D+sFTAMs2ZMGQXvw/l4jP9 +BzZAcg4atmpZ1gDlaCDdLnINH2pkMSCEfUmmWjfrRcmF9dTHF5kH5ptV5AzoqbTO +jFu1EVzPig4N1qx3gf4ynCSecs5U89BvolbW7MM3LGVYvlcAGvI1+ut7MV3CwRI9 +loGIlonBWVx65n9wNOeD4rHh4bhY79SV5GCc8JaXcozrhAIuZY+kt9J/Z93I055c +qqmkoCUUBpvsT34tC38ddfEz2O3OuHVtPlu5mB0xDVbYQw8wkbIEa91WvpWAVWe+ +2M2D2RjuLg+GLZKecBPs3lHJQ3gCpU3I+V/EkVhGFndadKpAvAefMLmx9xIX3eP/ +JEAdemrRTxgKqpAd60Ae36EeRJIQmvKN4dFLRp7oRUKX6kWZ8+xm1QL68qZKJKre +zrnK+T+Tb/mjuuqlPpmt/f97mfVl7vBZKGfXkJWkE4SphMHozs51k2MavDzq1WQf +LSoSOcbDWjLtR5EWDrw4wVDej8oqkDQc7kGUnF4ZLvhFSZl0kbAEb+MEWrGrKqv+ +x9CWttrhSmQGbmBNvUJO/3jaJMobtNeWOWyu8Q6qp31IiyBMz2TWuJdGsE7RKlY6 +oJO9r4Ak4Ap+58rVyuiFVdw2KuGUaJPHZnJED4AhMmwlxyOAgwrr +-----END CERTIFICATE----- diff -r 4d58a35f3cfa -r 4ad81e9e30fd make/hotspot/lib/CompileJvm.gmk --- a/make/hotspot/lib/CompileJvm.gmk Wed Nov 13 17:21:31 2019 -0500 +++ b/make/hotspot/lib/CompileJvm.gmk Mon Nov 18 12:40:06 2019 -0500 @@ -57,7 +57,7 @@ JVM_EXCLUDE_FILES += args.cc JVM_EXCLUDES += adlc -# Needed by vm_version.cpp +# Needed by abstract_vm_version.cpp ifeq ($(call isTargetCpu, x86_64), true) OPENJDK_TARGET_CPU_VM_VERSION := amd64 else ifeq ($(call isTargetCpu, sparcv9), true) @@ -183,7 +183,7 @@ EXCLUDE_PATTERNS := $(JVM_EXCLUDE_PATTERNS), \ EXTRA_OBJECT_FILES := $(DTRACE_EXTRA_OBJECT_FILES), \ CFLAGS := $(JVM_CFLAGS), \ - vm_version.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \ + abstract_vm_version.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \ arguments.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \ DISABLED_WARNINGS_gcc := $(DISABLED_WARNINGS_gcc), \ DISABLED_WARNINGS_clang := $(DISABLED_WARNINGS_clang), \ @@ -206,11 +206,11 @@ DEFINE_THIS_FILE := false, \ )) -# Always recompile vm_version.cpp if libjvm needs to be relinked. This ensures +# Always recompile abstract_vm_version.cpp if libjvm needs to be relinked. This ensures # that the internal vm version is updated as it relies on __DATE__ and __TIME__ # macros. -VM_VERSION_OBJ := $(JVM_OUTPUTDIR)/objs/vm_version$(OBJ_SUFFIX) -$(VM_VERSION_OBJ): $(filter-out $(VM_VERSION_OBJ) $(JVM_MAPFILE), \ +ABSTRACT_VM_VERSION_OBJ := $(JVM_OUTPUTDIR)/objs/abstract_vm_version$(OBJ_SUFFIX) +$(ABSTRACT_VM_VERSION_OBJ): $(filter-out $(ABSTRACT_VM_VERSION_OBJ) $(JVM_MAPFILE), \ $(BUILD_LIBJVM_TARGET_DEPS)) ifneq ($(GENERATE_COMPILE_COMMANDS_ONLY), true) diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/aarch64/vm_version_aarch64.cpp --- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -30,8 +30,8 @@ #include "runtime/java.hpp" #include "runtime/os.hpp" #include "runtime/stubCodeGenerator.hpp" +#include "runtime/vm_version.hpp" #include "utilities/macros.hpp" -#include "vm_version_aarch64.hpp" #include OS_HEADER_INLINE(os) diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/aarch64/vm_version_aarch64.hpp --- a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -26,8 +26,8 @@ #ifndef CPU_AARCH64_VM_VERSION_AARCH64_HPP #define CPU_AARCH64_VM_VERSION_AARCH64_HPP +#include "runtime/abstract_vm_version.hpp" #include "runtime/globals_extension.hpp" -#include "runtime/vm_version.hpp" #include "utilities/sizes.hpp" class VM_Version : public Abstract_VM_Version { diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/aarch64/vm_version_ext_aarch64.hpp --- a/src/hotspot/cpu/aarch64/vm_version_ext_aarch64.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/aarch64/vm_version_ext_aarch64.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -25,8 +25,8 @@ #ifndef CPU_AARCH64_VM_VERSION_EXT_AARCH64_HPP #define CPU_AARCH64_VM_VERSION_EXT_AARCH64_HPP +#include "runtime/vm_version.hpp" #include "utilities/macros.hpp" -#include "vm_version_aarch64.hpp" class VM_Version_Ext : public VM_Version { private: diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/arm/register_arm.hpp --- a/src/hotspot/cpu/arm/register_arm.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/arm/register_arm.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -26,7 +26,7 @@ #define CPU_ARM_REGISTER_ARM_HPP #include "asm/register.hpp" -#include "vm_version_arm.hpp" +#include "runtime/vm_version.hpp" class VMRegImpl; typedef VMRegImpl* VMReg; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/arm/vm_version_arm.hpp --- a/src/hotspot/cpu/arm/vm_version_arm.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/arm/vm_version_arm.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -25,8 +25,8 @@ #ifndef CPU_ARM_VM_VERSION_ARM_HPP #define CPU_ARM_VM_VERSION_ARM_HPP +#include "runtime/abstract_vm_version.hpp" #include "runtime/globals_extension.hpp" -#include "runtime/vm_version.hpp" class VM_Version: public Abstract_VM_Version { friend class JVMCIVMStructs; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/arm/vm_version_arm_32.cpp --- a/src/hotspot/cpu/arm/vm_version_arm_32.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/arm/vm_version_arm_32.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ #include "runtime/java.hpp" #include "runtime/os.inline.hpp" #include "runtime/stubCodeGenerator.hpp" -#include "vm_version_arm.hpp" +#include "runtime/vm_version.hpp" int VM_Version::_stored_pc_adjustment = 4; int VM_Version::_arm_arch = 5; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/arm/vm_version_ext_arm.hpp --- a/src/hotspot/cpu/arm/vm_version_ext_arm.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/arm/vm_version_ext_arm.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -25,8 +25,8 @@ #ifndef CPU_ARM_VM_VERSION_EXT_ARM_HPP #define CPU_ARM_VM_VERSION_EXT_ARM_HPP +#include "runtime/vm_version.hpp" #include "utilities/macros.hpp" -#include "vm_version_arm.hpp" class VM_Version_Ext : public VM_Version { private: diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/ppc/vm_version_ext_ppc.hpp --- a/src/hotspot/cpu/ppc/vm_version_ext_ppc.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/ppc/vm_version_ext_ppc.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -25,8 +25,8 @@ #ifndef CPU_PPC_VM_VERSION_EXT_PPC_HPP #define CPU_PPC_VM_VERSION_EXT_PPC_HPP +#include "runtime/vm_version.hpp" #include "utilities/macros.hpp" -#include "vm_version_ppc.hpp" #define CPU_INFO "cpu_info" #define CPU_TYPE "fpu_type" diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/ppc/vm_version_ppc.cpp --- a/src/hotspot/cpu/ppc/vm_version_ppc.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/ppc/vm_version_ppc.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -32,10 +32,10 @@ #include "runtime/java.hpp" #include "runtime/os.hpp" #include "runtime/stubCodeGenerator.hpp" +#include "runtime/vm_version.hpp" #include "utilities/align.hpp" #include "utilities/defaultStream.hpp" #include "utilities/globalDefinitions.hpp" -#include "vm_version_ppc.hpp" #include #if defined(_AIX) diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/ppc/vm_version_ppc.hpp --- a/src/hotspot/cpu/ppc/vm_version_ppc.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/ppc/vm_version_ppc.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -26,8 +26,8 @@ #ifndef CPU_PPC_VM_VERSION_PPC_HPP #define CPU_PPC_VM_VERSION_PPC_HPP +#include "runtime/abstract_vm_version.hpp" #include "runtime/globals_extension.hpp" -#include "runtime/vm_version.hpp" class VM_Version: public Abstract_VM_Version { protected: diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/s390/register_s390.hpp --- a/src/hotspot/cpu/s390/register_s390.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/s390/register_s390.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -1,6 +1,6 @@ /* * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016, 2017 SAP SE. All rights reserved. + * Copyright (c) 2016, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ #define CPU_S390_REGISTER_S390_HPP #include "asm/register.hpp" -#include "vm_version_s390.hpp" +#include "runtime/vm_version.hpp" class Address; class VMRegImpl; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/s390/vm_version_ext_s390.hpp --- a/src/hotspot/cpu/s390/vm_version_ext_s390.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/s390/vm_version_ext_s390.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -25,8 +25,8 @@ #ifndef CPU_S390_VM_VERSION_EXT_S390_HPP #define CPU_S390_VM_VERSION_EXT_S390_HPP +#include "runtime/vm_version.hpp" #include "utilities/macros.hpp" -#include "vm_version_s390.hpp" #define CPU_INFO "cpu_info" #define CPU_TYPE "fpu_type" diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/s390/vm_version_s390.cpp --- a/src/hotspot/cpu/s390/vm_version_s390.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/s390/vm_version_s390.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -31,7 +31,7 @@ #include "memory/resourceArea.hpp" #include "runtime/java.hpp" #include "runtime/stubCodeGenerator.hpp" -#include "vm_version_s390.hpp" +#include "runtime/vm_version.hpp" # include @@ -44,8 +44,8 @@ unsigned int VM_Version::_nfeatures = 0; unsigned int VM_Version::_ncipher_features = 0; unsigned int VM_Version::_nmsgdigest_features = 0; -unsigned int VM_Version::_Dcache_lineSize = 256; -unsigned int VM_Version::_Icache_lineSize = 256; +unsigned int VM_Version::_Dcache_lineSize = DEFAULT_CACHE_LINE_SIZE; +unsigned int VM_Version::_Icache_lineSize = DEFAULT_CACHE_LINE_SIZE; static const char* z_gen[] = {" ", "G1", "G2", "G3", "G4", "G5", "G6", "G7" }; static const char* z_machine[] = {" ", "2064", "2084", "2094", "2097", "2817", " ", "2964" }; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/s390/vm_version_s390.hpp --- a/src/hotspot/cpu/s390/vm_version_s390.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/s390/vm_version_s390.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -1,6 +1,6 @@ /* * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016, 2018 SAP SE. All rights reserved. + * Copyright (c) 2016, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,8 +27,8 @@ #define CPU_S390_VM_VERSION_S390_HPP +#include "runtime/abstract_vm_version.hpp" #include "runtime/globals_extension.hpp" -#include "runtime/vm_version.hpp" class VM_Version: public Abstract_VM_Version { diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/sparc/vm_version_ext_sparc.hpp --- a/src/hotspot/cpu/sparc/vm_version_ext_sparc.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/sparc/vm_version_ext_sparc.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -25,8 +25,8 @@ #ifndef CPU_SPARC_VM_VERSION_EXT_SPARC_HPP #define CPU_SPARC_VM_VERSION_EXT_SPARC_HPP +#include "runtime/vm_version.hpp" #include "utilities/macros.hpp" -#include "vm_version_sparc.hpp" #if defined(SOLARIS) #include diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/sparc/vm_version_sparc.cpp --- a/src/hotspot/cpu/sparc/vm_version_sparc.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/sparc/vm_version_sparc.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +32,7 @@ #include "runtime/java.hpp" #include "runtime/os.hpp" #include "runtime/stubCodeGenerator.hpp" -#include "vm_version_sparc.hpp" +#include "runtime/vm_version.hpp" #include diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/sparc/vm_version_sparc.hpp --- a/src/hotspot/cpu/sparc/vm_version_sparc.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/sparc/vm_version_sparc.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -25,8 +25,8 @@ #ifndef CPU_SPARC_VM_VERSION_SPARC_HPP #define CPU_SPARC_VM_VERSION_SPARC_HPP +#include "runtime/abstract_vm_version.hpp" #include "runtime/globals_extension.hpp" -#include "runtime/vm_version.hpp" class VM_Version: public Abstract_VM_Version { friend class VMStructs; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/x86/assembler_x86.hpp --- a/src/hotspot/cpu/x86/assembler_x86.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/x86/assembler_x86.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -26,7 +26,7 @@ #define CPU_X86_ASSEMBLER_X86_HPP #include "asm/register.hpp" -#include "vm_version_x86.hpp" +#include "runtime/vm_version.hpp" class BiasedLockingCounters; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp --- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -38,6 +38,7 @@ #include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/vframeArray.hpp" +#include "runtime/vm_version.hpp" #include "utilities/align.hpp" #include "vmreg_x86.inline.hpp" #ifdef COMPILER1 @@ -46,7 +47,6 @@ #ifdef COMPILER2 #include "opto/runtime.hpp" #endif -#include "vm_version_x86.hpp" #define __ masm-> diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp --- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -45,9 +45,9 @@ #include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/vframeArray.hpp" +#include "runtime/vm_version.hpp" #include "utilities/align.hpp" #include "utilities/formatBuffer.hpp" -#include "vm_version_x86.hpp" #include "vmreg_x86.inline.hpp" #ifdef COMPILER1 #include "c1/c1_Runtime1.hpp" diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/x86/vm_version_ext_x86.hpp --- a/src/hotspot/cpu/x86/vm_version_ext_x86.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/x86/vm_version_ext_x86.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -25,8 +25,8 @@ #ifndef CPU_X86_VM_VERSION_EXT_X86_HPP #define CPU_X86_VM_VERSION_EXT_X86_HPP +#include "runtime/vm_version.hpp" #include "utilities/macros.hpp" -#include "vm_version_x86.hpp" class VM_Version_Ext : public VM_Version { diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/x86/vm_version_x86.cpp --- a/src/hotspot/cpu/x86/vm_version_x86.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/x86/vm_version_x86.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -32,8 +32,8 @@ #include "runtime/java.hpp" #include "runtime/os.hpp" #include "runtime/stubCodeGenerator.hpp" +#include "runtime/vm_version.hpp" #include "utilities/virtualizationSupport.hpp" -#include "vm_version_x86.hpp" #include OS_HEADER_INLINE(os) diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/x86/vm_version_x86.hpp --- a/src/hotspot/cpu/x86/vm_version_x86.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/x86/vm_version_x86.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -26,8 +26,8 @@ #define CPU_X86_VM_VERSION_X86_HPP #include "memory/universe.hpp" +#include "runtime/abstract_vm_version.hpp" #include "runtime/globals_extension.hpp" -#include "runtime/vm_version.hpp" class VM_Version : public Abstract_VM_Version { friend class VMStructs; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/zero/register_zero.hpp --- a/src/hotspot/cpu/zero/register_zero.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/zero/register_zero.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -27,7 +27,7 @@ #define CPU_ZERO_REGISTER_ZERO_HPP #include "asm/register.hpp" -#include "vm_version_zero.hpp" +#include "runtime/vm_version.hpp" class VMRegImpl; typedef VMRegImpl* VMReg; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/zero/vm_version_ext_zero.hpp --- a/src/hotspot/cpu/zero/vm_version_ext_zero.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/zero/vm_version_ext_zero.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -25,8 +25,8 @@ #ifndef CPU_ZERO_VM_VERSION_EXT_ZERO_HPP #define CPU_ZERO_VM_VERSION_EXT_ZERO_HPP +#include "runtime/vm_version.hpp" #include "utilities/macros.hpp" -#include "vm_version_zero.hpp" class VM_Version_Ext : public VM_Version { private: diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/zero/vm_version_zero.cpp --- a/src/hotspot/cpu/zero/vm_version_zero.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/zero/vm_version_zero.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright 2009 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -28,7 +28,7 @@ #include "memory/resourceArea.hpp" #include "runtime/java.hpp" #include "runtime/stubCodeGenerator.hpp" -#include "vm_version_zero.hpp" +#include "runtime/vm_version.hpp" void VM_Version::initialize() { diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/cpu/zero/vm_version_zero.hpp --- a/src/hotspot/cpu/zero/vm_version_zero.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/cpu/zero/vm_version_zero.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -26,8 +26,8 @@ #ifndef CPU_ZERO_VM_VERSION_ZERO_HPP #define CPU_ZERO_VM_VERSION_ZERO_HPP +#include "runtime/abstract_vm_version.hpp" #include "runtime/globals_extension.hpp" -#include "runtime/vm_version.hpp" class VM_Version : public Abstract_VM_Version { public: diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os/aix/os_aix.cpp --- a/src/hotspot/os/aix/os_aix.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os/aix/os_aix.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -2341,6 +2341,10 @@ return 0; } +int os::numa_get_group_id_for_address(const void* address) { + return 0; +} + bool os::get_page_info(char *start, page_info* info) { return false; } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os/bsd/os_perf_bsd.cpp --- a/src/hotspot/os/bsd/os_perf_bsd.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os/bsd/os_perf_bsd.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -26,7 +26,7 @@ #include "memory/resourceArea.hpp" #include "runtime/os.hpp" #include "runtime/os_perf.hpp" -#include "vm_version_ext_x86.hpp" +#include CPU_HEADER(vm_version_ext) #ifdef __APPLE__ #import diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os/posix/os_posix.cpp --- a/src/hotspot/os/posix/os_posix.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os/posix/os_posix.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -2075,10 +2075,12 @@ // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock. ThreadBlockInVM tbivm(jt); + // Can't access interrupt state now that we are _thread_blocked. If we've + // been interrupted since we checked above then _counter will be > 0. + // Don't wait if cannot get lock since interference arises from - // unparking. Also re-check interrupt before trying wait. - if (jt->is_interrupted(false) || - pthread_mutex_trylock(_mutex) != 0) { + // unparking. + if (pthread_mutex_trylock(_mutex) != 0) { return; } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os/solaris/os_solaris.cpp --- a/src/hotspot/os/solaris/os_solaris.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os/solaris/os_solaris.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -4925,10 +4925,12 @@ // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock. ThreadBlockInVM tbivm(jt); + // Can't access interrupt state now that we are _thread_blocked. If we've + // been interrupted since we checked above then _counter will be > 0. + // Don't wait if cannot get lock since interference arises from - // unblocking. Also. check interrupt before trying wait - if (jt->is_interrupted(false) || - os::Solaris::mutex_trylock(_mutex) != 0) { + // unblocking. + if (os::Solaris::mutex_trylock(_mutex) != 0) { return; } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os/windows/os_perf_windows.cpp --- a/src/hotspot/os/windows/os_perf_windows.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os/windows/os_perf_windows.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -31,7 +31,7 @@ #include "runtime/os_perf.hpp" #include "runtime/os.hpp" #include "utilities/macros.hpp" -#include "vm_version_ext_x86.hpp" +#include CPU_HEADER(vm_version_ext) #include #include #include diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os_cpu/bsd_x86/vm_version_bsd_x86.cpp --- a/src/hotspot/os_cpu/bsd_x86/vm_version_bsd_x86.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os_cpu/bsd_x86/vm_version_bsd_x86.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,4 +24,4 @@ #include "precompiled.hpp" #include "runtime/os.hpp" -#include "vm_version_x86.hpp" +#include "runtime/vm_version.hpp" diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os_cpu/bsd_zero/vm_version_bsd_zero.cpp --- a/src/hotspot/os_cpu/bsd_zero/vm_version_bsd_zero.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os_cpu/bsd_zero/vm_version_bsd_zero.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright 2009 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -25,6 +25,6 @@ #include "precompiled.hpp" #include "runtime/os.hpp" -#include "vm_version_zero.hpp" +#include "runtime/vm_version.hpp" // This file is intentionally empty diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp --- a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -1,6 +1,6 @@ /* * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,9 +26,11 @@ #ifndef OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP #define OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP -#include "vm_version_aarch64.hpp" +#include "runtime/vm_version.hpp" // Implementation of class atomic +// Note that memory_order_conservative requires a full barrier after atomic stores. +// See https://patchwork.kernel.org/patch/3575821/ #define FULL_MEM_BARRIER __sync_synchronize() #define READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE); @@ -52,7 +54,7 @@ T volatile* dest, atomic_memory_order order) const { STATIC_ASSERT(byte_size == sizeof(T)); - T res = __sync_lock_test_and_set(dest, exchange_value); + T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE); FULL_MEM_BARRIER; return res; } @@ -70,7 +72,12 @@ __ATOMIC_RELAXED, __ATOMIC_RELAXED); return value; } else { - return __sync_val_compare_and_swap(dest, compare_value, exchange_value); + T value = compare_value; + FULL_MEM_BARRIER; + __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false, + __ATOMIC_RELAXED, __ATOMIC_RELAXED); + FULL_MEM_BARRIER; + return value; } } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp --- a/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -1,6 +1,6 @@ /* * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ // Included in orderAccess.hpp header file. -#include "vm_version_aarch64.hpp" +#include "runtime/vm_version.hpp" // Implementation of class OrderAccess. diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os_cpu/linux_aarch64/vm_version_linux_aarch64.cpp --- a/src/hotspot/os_cpu/linux_aarch64/vm_version_linux_aarch64.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os_cpu/linux_aarch64/vm_version_linux_aarch64.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -1,6 +1,6 @@ /* - * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,5 +25,5 @@ #include "precompiled.hpp" #include "runtime/os.hpp" -#include "vm_version_aarch64.hpp" +#include "runtime/vm_version.hpp" diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp --- a/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -26,7 +26,7 @@ #define OS_CPU_LINUX_ARM_ATOMIC_LINUX_ARM_HPP #include "runtime/os.hpp" -#include "vm_version_arm.hpp" +#include "runtime/vm_version.hpp" // Implementation of class atomic diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp --- a/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -28,7 +28,7 @@ // Included in orderAccess.hpp header file. #include "runtime/os.hpp" -#include "vm_version_arm.hpp" +#include "runtime/vm_version.hpp" // Implementation of class OrderAccess. // - we define the high level barriers below and use the general diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os_cpu/linux_arm/vm_version_linux_arm_32.cpp --- a/src/hotspot/os_cpu/linux_arm/vm_version_linux_arm_32.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os_cpu/linux_arm/vm_version_linux_arm_32.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,7 +24,7 @@ #include "precompiled.hpp" #include "runtime/os.hpp" -#include "vm_version_arm.hpp" +#include "runtime/vm_version.hpp" # include diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp --- a/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -1,6 +1,6 @@ /* * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016, 2018 SAP SE. All rights reserved. + * Copyright (c) 2016, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ #include "runtime/atomic.hpp" #include "runtime/os.hpp" -#include "vm_version_s390.hpp" +#include "runtime/vm_version.hpp" // Note that the compare-and-swap instructions on System z perform // a serialization function before the storage operand is fetched diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp --- a/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -1,6 +1,6 @@ /* * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016 SAP SE. All rights reserved. + * Copyright (c) 2016, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ // Included in orderAccess.hpp header file. -#include "vm_version_s390.hpp" +#include "runtime/vm_version.hpp" // Implementation of class OrderAccess. diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os_cpu/linux_sparc/vm_version_linux_sparc.cpp --- a/src/hotspot/os_cpu/linux_sparc/vm_version_linux_sparc.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os_cpu/linux_sparc/vm_version_linux_sparc.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #include "logging/log.hpp" #include "precompiled.hpp" #include "runtime/os.hpp" -#include "vm_version_sparc.hpp" +#include "runtime/vm_version.hpp" #define CPUINFO_LINE_SIZE 1024 diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os_cpu/linux_x86/vm_version_linux_x86.cpp --- a/src/hotspot/os_cpu/linux_x86/vm_version_linux_x86.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os_cpu/linux_x86/vm_version_linux_x86.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,5 +24,5 @@ #include "precompiled.hpp" #include "runtime/os.hpp" -#include "vm_version_x86.hpp" +#include "runtime/vm_version.hpp" diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os_cpu/linux_zero/vm_version_linux_zero.cpp --- a/src/hotspot/os_cpu/linux_zero/vm_version_linux_zero.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os_cpu/linux_zero/vm_version_linux_zero.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright 2009 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -25,6 +25,6 @@ #include "precompiled.hpp" #include "runtime/os.hpp" -#include "vm_version_zero.hpp" +#include "runtime/vm_version.hpp" // This file is intentionally empty diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os_cpu/solaris_sparc/vm_version_solaris_sparc.cpp --- a/src/hotspot/os_cpu/solaris_sparc/vm_version_solaris_sparc.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os_cpu/solaris_sparc/vm_version_solaris_sparc.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ #include "memory/allocation.hpp" #include "memory/allocation.inline.hpp" #include "runtime/os.hpp" -#include "vm_version_sparc.hpp" +#include "runtime/vm_version.hpp" #include #include diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os_cpu/solaris_x86/vm_version_solaris_x86.cpp --- a/src/hotspot/os_cpu/solaris_x86/vm_version_solaris_x86.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os_cpu/solaris_x86/vm_version_solaris_x86.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,5 +24,5 @@ #include "precompiled.hpp" #include "runtime/os.hpp" -#include "vm_version_x86.hpp" +#include "runtime/vm_version.hpp" diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/os_cpu/windows_x86/vm_version_windows_x86.cpp --- a/src/hotspot/os_cpu/windows_x86/vm_version_windows_x86.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/os_cpu/windows_x86/vm_version_windows_x86.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,5 +24,5 @@ #include "precompiled.hpp" #include "runtime/os.hpp" -#include "vm_version_x86.hpp" +#include "runtime/vm_version.hpp" diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/classfile/javaClasses.cpp --- a/src/hotspot/share/classfile/javaClasses.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/classfile/javaClasses.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -1077,7 +1077,7 @@ Klass *ak = (Klass*)(archived_m->metadata_field(_array_klass_offset)); assert(ak != NULL || t == T_VOID, "should not be NULL"); if (ak != NULL) { - Klass *reloc_ak = MetaspaceShared::get_relocated_klass(ak); + Klass *reloc_ak = MetaspaceShared::get_relocated_klass(ak, true); archived_m->metadata_field_put(_array_klass_offset, reloc_ak); } @@ -1222,7 +1222,7 @@ // The archived mirror's field at _klass_offset is still pointing to the original // klass. Updated the field in the archived mirror to point to the relocated // klass in the archive. - Klass *reloc_k = MetaspaceShared::get_relocated_klass(as_Klass(mirror)); + Klass *reloc_k = MetaspaceShared::get_relocated_klass(as_Klass(mirror), true); log_debug(cds, heap, mirror)( "Relocate mirror metadata field at _klass_offset from " PTR_FORMAT " ==> " PTR_FORMAT, p2i(as_Klass(mirror)), p2i(reloc_k)); @@ -1232,7 +1232,7 @@ // higher array klass if exists. Relocate the pointer. Klass *arr = array_klass_acquire(mirror); if (arr != NULL) { - Klass *reloc_arr = MetaspaceShared::get_relocated_klass(arr); + Klass *reloc_arr = MetaspaceShared::get_relocated_klass(arr, true); log_debug(cds, heap, mirror)( "Relocate mirror metadata field at _array_klass_offset from " PTR_FORMAT " ==> " PTR_FORMAT, p2i(arr), p2i(reloc_arr)); @@ -1241,6 +1241,33 @@ return archived_mirror; } +void java_lang_Class::update_archived_primitive_mirror_native_pointers(oop archived_mirror) { + if (MetaspaceShared::relocation_delta() != 0) { + assert(archived_mirror->metadata_field(_klass_offset) == NULL, "must be for primitive class"); + + Klass* ak = ((Klass*)archived_mirror->metadata_field(_array_klass_offset)); + if (ak != NULL) { + archived_mirror->metadata_field_put(_array_klass_offset, + (Klass*)(address(ak) + MetaspaceShared::relocation_delta())); + } + } +} + +void java_lang_Class::update_archived_mirror_native_pointers(oop archived_mirror) { + if (MetaspaceShared::relocation_delta() != 0) { + Klass* k = ((Klass*)archived_mirror->metadata_field(_klass_offset)); + archived_mirror->metadata_field_put(_klass_offset, + (Klass*)(address(k) + MetaspaceShared::relocation_delta())); + + Klass* ak = ((Klass*)archived_mirror->metadata_field(_array_klass_offset)); + if (ak != NULL) { + archived_mirror->metadata_field_put(_array_klass_offset, + (Klass*)(address(ak) + MetaspaceShared::relocation_delta())); + } + } +} + + // Returns true if the mirror is updated, false if no archived mirror // data is present. After the archived mirror object is restored, the // shared klass' _has_raw_archived_mirror flag is cleared. @@ -1256,15 +1283,15 @@ } oop m = HeapShared::materialize_archived_object(k->archived_java_mirror_raw_narrow()); - if (m == NULL) { return false; } + // mirror is archived, restore log_debug(cds, mirror)("Archived mirror is: " PTR_FORMAT, p2i(m)); - - // mirror is archived, restore assert(HeapShared::is_archived_object(m), "must be archived mirror object"); + update_archived_mirror_native_pointers(m); + assert(as_Klass(m) == k, "must be"); Handle mirror(THREAD, m); if (!k->is_array_klass()) { @@ -1681,10 +1708,20 @@ } bool java_lang_Thread::interrupted(oop java_thread) { + // Make sure the caller can safely access oops. + assert(Thread::current()->is_VM_thread() || + (JavaThread::current()->thread_state() != _thread_blocked && + JavaThread::current()->thread_state() != _thread_in_native), + "Unsafe access to oop"); return java_thread->bool_field_volatile(_interrupted_offset); } void java_lang_Thread::set_interrupted(oop java_thread, bool val) { + // Make sure the caller can safely access oops. + assert(Thread::current()->is_VM_thread() || + (JavaThread::current()->thread_state() != _thread_blocked && + JavaThread::current()->thread_state() != _thread_in_native), + "Unsafe access to oop"); java_thread->bool_field_put_volatile(_interrupted_offset, val); } @@ -4649,6 +4686,28 @@ } #endif +#if INCLUDE_CDS_JAVA_HEAP +bool JavaClasses::is_supported_for_archiving(oop obj) { + Klass* klass = obj->klass(); + + if (klass == SystemDictionary::ClassLoader_klass() || // ClassLoader::loader_data is malloc'ed. + klass == SystemDictionary::Module_klass() || // Module::module_entry is malloc'ed + // The next 3 classes are used to implement java.lang.invoke, and are not used directly in + // regular Java code. The implementation of java.lang.invoke uses generated anonymoys classes + // (e.g., as referenced by ResolvedMethodName::vmholder) that are not yet supported by CDS. + // So for now we cannot not support these classes for archiving. + // + // These objects typically are not referenced by static fields, but rather by resolved + // constant pool entries, so excluding them shouldn't affect the archiving of static fields. + klass == SystemDictionary::ResolvedMethodName_klass() || + klass == SystemDictionary::MemberName_klass() || + klass == SystemDictionary::Context_klass()) { + return false; + } + + return true; +} +#endif #ifndef PRODUCT diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/classfile/javaClasses.hpp --- a/src/hotspot/share/classfile/javaClasses.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/classfile/javaClasses.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -281,6 +281,8 @@ Handle protection_domain, TRAPS); static void fixup_mirror(Klass* k, TRAPS); static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS); + static void update_archived_primitive_mirror_native_pointers(oop archived_mirror) NOT_CDS_JAVA_HEAP_RETURN; + static void update_archived_mirror_native_pointers(oop archived_mirror) NOT_CDS_JAVA_HEAP_RETURN; // Archiving static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; @@ -1662,6 +1664,7 @@ static void check_offsets() PRODUCT_RETURN; static void serialize_offsets(SerializeClosure* soc) NOT_CDS_RETURN; static InjectedField* get_injected(Symbol* class_name, int* field_count); + static bool is_supported_for_archiving(oop obj) NOT_CDS_JAVA_HEAP_RETURN_(false); }; #undef DECLARE_INJECTED_FIELD_ENUM diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/classfile/systemDictionaryShared.cpp --- a/src/hotspot/share/classfile/systemDictionaryShared.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -38,6 +38,7 @@ #include "classfile/vmSymbols.hpp" #include "logging/log.hpp" #include "memory/allocation.hpp" +#include "memory/archiveUtils.hpp" #include "memory/filemap.hpp" #include "memory/metadataFactory.hpp" #include "memory/metaspaceClosure.hpp" @@ -294,6 +295,7 @@ if (DynamicDumpSharedSpaces) { _klass = DynamicArchive::original_to_target(info._klass); } + ArchivePtrMarker::mark_pointer(&_klass); } bool matches(int clsfile_size, int clsfile_crc32) const { @@ -337,6 +339,8 @@ } else { *info_pointer_addr(klass) = record; } + + ArchivePtrMarker::mark_pointer(info_pointer_addr(klass)); } // Used by RunTimeSharedDictionary to implement OffsetCompactHashtable::EQUALS @@ -1354,7 +1358,7 @@ if (DynamicDumpSharedSpaces) { name = DynamicArchive::original_to_target(name); } - hash = primitive_hash(name); + hash = SystemDictionaryShared::hash_for_shared_dictionary(name); u4 delta; if (DynamicDumpSharedSpaces) { delta = MetaspaceShared::object_delta_u4(DynamicArchive::buffer_to_target(record)); @@ -1413,7 +1417,7 @@ return NULL; } - unsigned int hash = primitive_hash(name); + unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(name); const RunTimeSharedClassInfo* record = NULL; if (!MetaspaceShared::is_shared_dynamic(name)) { // The names of all shared classes in the static dict must also be in the diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/classfile/systemDictionaryShared.hpp --- a/src/hotspot/share/classfile/systemDictionaryShared.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/classfile/systemDictionaryShared.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -323,6 +323,12 @@ }; #endif + template + static unsigned int hash_for_shared_dictionary(T* ptr) { + assert(ptr > (T*)SharedBaseAddress, "must be"); + address p = address(ptr) - SharedBaseAddress; + return primitive_hash
(p); + } }; #endif // SHARE_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/compiler/compileBroker.cpp --- a/src/hotspot/share/compiler/compileBroker.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/compiler/compileBroker.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -596,7 +596,7 @@ // CompileBroker::compilation_init // // Initialize the Compilation object -void CompileBroker::compilation_init_phase1(TRAPS) { +void CompileBroker::compilation_init_phase1(Thread* THREAD) { // No need to initialize compilation system if we do not use it. if (!UseCompiler) { return; @@ -647,6 +647,7 @@ // totalTime performance counter is always created as it is required // by the implementation of java.lang.management.CompilationMBean. { + // Ensure OOM leads to vm_exit_during_initialization. EXCEPTION_MARK; _perf_total_compilation = PerfDataManager::create_counter(JAVA_CI, "totalTime", @@ -761,17 +762,17 @@ } -JavaThread* CompileBroker::make_thread(jobject thread_handle, CompileQueue* queue, AbstractCompiler* comp, TRAPS) { - JavaThread* thread = NULL; +JavaThread* CompileBroker::make_thread(jobject thread_handle, CompileQueue* queue, AbstractCompiler* comp, Thread* THREAD) { + JavaThread* new_thread = NULL; { MutexLocker mu(Threads_lock, THREAD); if (comp != NULL) { if (!InjectCompilerCreationFailure || comp->num_compiler_threads() == 0) { CompilerCounters* counters = new CompilerCounters(); - thread = new CompilerThread(queue, counters); + new_thread = new CompilerThread(queue, counters); } } else { - thread = new CodeCacheSweeperThread(); + new_thread = new CodeCacheSweeperThread(); } // At this point the new CompilerThread data-races with this startup // thread (which I believe is the primoridal thread and NOT the VM @@ -786,9 +787,9 @@ // exceptions anyway, check and abort if this fails. But first release the // lock. - if (thread != NULL && thread->osthread() != NULL) { + if (new_thread != NULL && new_thread->osthread() != NULL) { - java_lang_Thread::set_thread(JNIHandles::resolve_non_null(thread_handle), thread); + java_lang_Thread::set_thread(JNIHandles::resolve_non_null(thread_handle), new_thread); // Note that this only sets the JavaThread _priority field, which by // definition is limited to Java priorities and not OS priorities. @@ -809,24 +810,24 @@ native_prio = os::java_to_os_priority[NearMaxPriority]; } } - os::set_native_priority(thread, native_prio); + os::set_native_priority(new_thread, native_prio); java_lang_Thread::set_daemon(JNIHandles::resolve_non_null(thread_handle)); - thread->set_threadObj(JNIHandles::resolve_non_null(thread_handle)); + new_thread->set_threadObj(JNIHandles::resolve_non_null(thread_handle)); if (comp != NULL) { - thread->as_CompilerThread()->set_compiler(comp); + new_thread->as_CompilerThread()->set_compiler(comp); } - Threads::add(thread); - Thread::start(thread); + Threads::add(new_thread); + Thread::start(new_thread); } } // First release lock before aborting VM. - if (thread == NULL || thread->osthread() == NULL) { + if (new_thread == NULL || new_thread->osthread() == NULL) { if (UseDynamicNumberOfCompilerThreads && comp != NULL && comp->num_compiler_threads() > 0) { - if (thread != NULL) { - thread->smr_delete(); + if (new_thread != NULL) { + new_thread->smr_delete(); } return NULL; } @@ -837,11 +838,12 @@ // Let go of Threads_lock before yielding os::naked_yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS) - return thread; + return new_thread; } void CompileBroker::init_compiler_sweeper_threads() { + // Ensure any exceptions lead to vm_exit_during_initialization. EXCEPTION_MARK; #if !defined(ZERO) assert(_c2_count > 0 || _c1_count > 0, "No compilers?"); @@ -875,7 +877,7 @@ _compiler2_logs[i] = NULL; if (!UseDynamicNumberOfCompilerThreads || i == 0) { - JavaThread *ct = make_thread(thread_handle, _c2_compile_queue, _compilers[1], CHECK); + JavaThread *ct = make_thread(thread_handle, _c2_compile_queue, _compilers[1], THREAD); assert(ct != NULL, "should have been handled for initial thread"); _compilers[1]->set_num_compiler_threads(i + 1); if (TraceCompilerThreads) { @@ -895,7 +897,7 @@ _compiler1_logs[i] = NULL; if (!UseDynamicNumberOfCompilerThreads || i == 0) { - JavaThread *ct = make_thread(thread_handle, _c1_compile_queue, _compilers[0], CHECK); + JavaThread *ct = make_thread(thread_handle, _c1_compile_queue, _compilers[0], THREAD); assert(ct != NULL, "should have been handled for initial thread"); _compilers[0]->set_num_compiler_threads(i + 1); if (TraceCompilerThreads) { @@ -914,12 +916,11 @@ // Initialize the sweeper thread Handle thread_oop = create_thread_oop("Sweeper thread", CHECK); jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop()); - make_thread(thread_handle, NULL, NULL, CHECK); + make_thread(thread_handle, NULL, NULL, THREAD); } } -void CompileBroker::possibly_add_compiler_threads() { - EXCEPTION_MARK; +void CompileBroker::possibly_add_compiler_threads(Thread* THREAD) { julong available_memory = os::available_memory(); // If SegmentedCodeCache is off, both values refer to the single heap (with type CodeBlobType::All). @@ -970,7 +971,7 @@ _compiler2_objects[i] = thread_handle; } #endif - JavaThread *ct = make_thread(compiler2_object(i), _c2_compile_queue, _compilers[1], CHECK); + JavaThread *ct = make_thread(compiler2_object(i), _c2_compile_queue, _compilers[1], THREAD); if (ct == NULL) break; _compilers[1]->set_num_compiler_threads(i + 1); if (TraceCompilerThreads) { @@ -990,7 +991,7 @@ (int)(available_cc_p / (128*K))); for (int i = old_c1_count; i < new_c1_count; i++) { - JavaThread *ct = make_thread(compiler1_object(i), _c1_compile_queue, _compilers[0], CHECK); + JavaThread *ct = make_thread(compiler1_object(i), _c1_compile_queue, _compilers[0], THREAD); if (ct == NULL) break; _compilers[0]->set_num_compiler_threads(i + 1); if (TraceCompilerThreads) { @@ -1511,14 +1512,6 @@ } // ------------------------------------------------------------------ -// CompileBroker::preload_classes -void CompileBroker::preload_classes(const methodHandle& method, TRAPS) { - // Move this code over from c1_Compiler.cpp - ShouldNotReachHere(); -} - - -// ------------------------------------------------------------------ // CompileBroker::create_compile_task // // Create a CompileTask object representing the current request for @@ -1865,7 +1858,8 @@ } if (UseDynamicNumberOfCompilerThreads) { - possibly_add_compiler_threads(); + possibly_add_compiler_threads(thread); + assert(!thread->has_pending_exception(), "should have been handled"); } } } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/compiler/compileBroker.hpp --- a/src/hotspot/share/compiler/compileBroker.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/compiler/compileBroker.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -226,11 +226,10 @@ static volatile int _print_compilation_warning; static Handle create_thread_oop(const char* name, TRAPS); - static JavaThread* make_thread(jobject thread_oop, CompileQueue* queue, AbstractCompiler* comp, TRAPS); + static JavaThread* make_thread(jobject thread_oop, CompileQueue* queue, AbstractCompiler* comp, Thread* THREAD); static void init_compiler_sweeper_threads(); - static void possibly_add_compiler_threads(); + static void possibly_add_compiler_threads(Thread* THREAD); static bool compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level, bool excluded); - static void preload_classes (const methodHandle& method, TRAPS); static CompileTask* create_compile_task(CompileQueue* queue, int compile_id, @@ -292,7 +291,7 @@ CompileQueue *q = compile_queue(comp_level); return q != NULL ? q->size() : 0; } - static void compilation_init_phase1(TRAPS); + static void compilation_init_phase1(Thread* THREAD); static void compilation_init_phase2(); static void init_compiler_thread_log(); static nmethod* compile_method(const methodHandle& method, diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/gc/g1/g1CollectedHeap.cpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -2001,7 +2001,6 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { switch (cause) { - case GCCause::_gc_locker: return GCLockerInvokesConcurrent; case GCCause::_g1_humongous_allocation: return true; case GCCause::_g1_periodic_collection: return G1PeriodicGCInvokesConcurrent; default: return is_user_requested_concurrent_full_gc(cause); @@ -2009,7 +2008,7 @@ } bool G1CollectedHeap::should_upgrade_to_full_gc(GCCause::Cause cause) { - if(policy()->force_upgrade_to_full()) { + if (policy()->force_upgrade_to_full()) { return true; } else if (should_do_concurrent_full_gc(_gc_cause)) { return false; @@ -2056,7 +2055,7 @@ } void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) { - MonitorLocker x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); + MonitorLocker ml(G1OldGCCount_lock, Mutex::_no_safepoint_check_flag); // We assume that if concurrent == true, then the caller is a // concurrent thread that was joined the Suspendible Thread @@ -2096,91 +2095,210 @@ _cm_thread->set_idle(); } - // This notify_all() will ensure that a thread that called - // System.gc() with (with ExplicitGCInvokesConcurrent set or not) - // and it's waiting for a full GC to finish will be woken up. It is - // waiting in VM_G1CollectForAllocation::doit_epilogue(). - FullGCCount_lock->notify_all(); + // Notify threads waiting in System.gc() (with ExplicitGCInvokesConcurrent) + // for a full GC to finish that their wait is over. + ml.notify_all(); } void G1CollectedHeap::collect(GCCause::Cause cause) { - try_collect(cause, true); + try_collect(cause); +} + +// Return true if (x < y) with allowance for wraparound. +static bool gc_counter_less_than(uint x, uint y) { + return (x - y) > (UINT_MAX/2); } -bool G1CollectedHeap::try_collect(GCCause::Cause cause, bool retry_on_gc_failure) { +// LOG_COLLECT_CONCURRENTLY(cause, msg, args...) +// Macro so msg printing is format-checked. +#define LOG_COLLECT_CONCURRENTLY(cause, ...) \ + do { \ + LogTarget(Trace, gc) LOG_COLLECT_CONCURRENTLY_lt; \ + if (LOG_COLLECT_CONCURRENTLY_lt.is_enabled()) { \ + ResourceMark rm; /* For thread name. */ \ + LogStream LOG_COLLECT_CONCURRENTLY_s(&LOG_COLLECT_CONCURRENTLY_lt); \ + LOG_COLLECT_CONCURRENTLY_s.print("%s: Try Collect Concurrently (%s): ", \ + Thread::current()->name(), \ + GCCause::to_string(cause)); \ + LOG_COLLECT_CONCURRENTLY_s.print(__VA_ARGS__); \ + } \ + } while (0) + +#define LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, result) \ + LOG_COLLECT_CONCURRENTLY(cause, "complete %s", BOOL_TO_STR(result)) + +bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause, + uint gc_counter, + uint old_marking_started_before) { assert_heap_not_locked(); - - bool gc_succeeded; - bool should_retry_gc; - - do { - should_retry_gc = false; - - uint gc_count_before; - uint old_marking_count_before; - uint full_gc_count_before; - + assert(should_do_concurrent_full_gc(cause), + "Non-concurrent cause %s", GCCause::to_string(cause)); + + for (uint i = 1; true; ++i) { + // Try to schedule an initial-mark evacuation pause that will + // start a concurrent cycle. + LOG_COLLECT_CONCURRENTLY(cause, "attempt %u", i); + VM_G1TryInitiateConcMark op(gc_counter, + cause, + policy()->max_pause_time_ms()); + VMThread::execute(&op); + + // Request is trivially finished. + if (cause == GCCause::_g1_periodic_collection) { + LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, op.gc_succeeded()); + return op.gc_succeeded(); + } + + // Lock to get consistent set of values. + uint old_marking_started_after; + uint old_marking_completed_after; { MutexLocker ml(Heap_lock); - - // Read the GC count while holding the Heap_lock - gc_count_before = total_collections(); - full_gc_count_before = total_full_collections(); - old_marking_count_before = _old_marking_cycles_started; + // Update gc_counter for retrying VMOp if needed. Captured here to be + // consistent with the values we use below for termination tests. If + // a retry is needed after a possible wait, and another collection + // occurs in the meantime, it will cause our retry to be skipped and + // we'll recheck for termination with updated conditions from that + // more recent collection. That's what we want, rather than having + // our retry possibly perform an unnecessary collection. + gc_counter = total_collections(); + old_marking_started_after = _old_marking_cycles_started; + old_marking_completed_after = _old_marking_cycles_completed; } - if (should_do_concurrent_full_gc(cause)) { - // Schedule an initial-mark evacuation pause that will start a - // concurrent cycle. We're setting word_size to 0 which means that - // we are not requesting a post-GC allocation. - VM_G1CollectForAllocation op(0, /* word_size */ - gc_count_before, - cause, - true, /* should_initiate_conc_mark */ - policy()->max_pause_time_ms()); - VMThread::execute(&op); - gc_succeeded = op.gc_succeeded(); - if (!gc_succeeded && retry_on_gc_failure) { - if (old_marking_count_before == _old_marking_cycles_started) { - should_retry_gc = op.should_retry_gc(); - } else { - // A Full GC happened while we were trying to schedule the - // concurrent cycle. No point in starting a new cycle given - // that the whole heap was collected anyway. + if (!GCCause::is_user_requested_gc(cause)) { + // For an "automatic" (not user-requested) collection, we just need to + // ensure that progress is made. + // + // Request is finished if any of + // (1) the VMOp successfully performed a GC, + // (2) a concurrent cycle was already in progress, + // (3) a new cycle was started (by this thread or some other), or + // (4) a Full GC was performed. + // Cases (3) and (4) are detected together by a change to + // _old_marking_cycles_started. + // + // Note that (1) does not imply (3). If we're still in the mixed + // phase of an earlier concurrent collection, the request to make the + // collection an initial-mark won't be honored. If we don't check for + // both conditions we'll spin doing back-to-back collections. + if (op.gc_succeeded() || + op.cycle_already_in_progress() || + (old_marking_started_before != old_marking_started_after)) { + LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true); + return true; + } + } else { // User-requested GC. + // For a user-requested collection, we want to ensure that a complete + // full collection has been performed before returning, but without + // waiting for more than needed. + + // For user-requested GCs (unlike non-UR), a successful VMOp implies a + // new cycle was started. That's good, because it's not clear what we + // should do otherwise. Trying again just does back to back GCs. + // Can't wait for someone else to start a cycle. And returning fails + // to meet the goal of ensuring a full collection was performed. + assert(!op.gc_succeeded() || + (old_marking_started_before != old_marking_started_after), + "invariant: succeeded %s, started before %u, started after %u", + BOOL_TO_STR(op.gc_succeeded()), + old_marking_started_before, old_marking_started_after); + + // Request is finished if a full collection (concurrent or stw) + // was started after this request and has completed, e.g. + // started_before < completed_after. + if (gc_counter_less_than(old_marking_started_before, + old_marking_completed_after)) { + LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true); + return true; + } + + if (old_marking_started_after != old_marking_completed_after) { + // If there is an in-progress cycle (possibly started by us), then + // wait for that cycle to complete, e.g. + // while completed_now < started_after. + LOG_COLLECT_CONCURRENTLY(cause, "wait"); + MonitorLocker ml(G1OldGCCount_lock); + while (gc_counter_less_than(_old_marking_cycles_completed, + old_marking_started_after)) { + ml.wait(); } - - if (should_retry_gc && GCLocker::is_active_and_needs_gc()) { - GCLocker::stall_until_clear(); + // Request is finished if the collection we just waited for was + // started after this request. + if (old_marking_started_before != old_marking_started_after) { + LOG_COLLECT_CONCURRENTLY(cause, "complete after wait"); + return true; } } - } else if (GCLocker::should_discard(cause, gc_count_before)) { - // Return false to be consistent with VMOp failure due to - // another collection slipping in after our gc_count but before - // our request is processed. _gc_locker collections upgraded by - // GCLockerInvokesConcurrent are handled above and never discarded. - return false; - } else { - if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc - DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { - - // Schedule a standard evacuation pause. We're setting word_size - // to 0 which means that we are not requesting a post-GC allocation. - VM_G1CollectForAllocation op(0, /* word_size */ - gc_count_before, - cause, - false, /* should_initiate_conc_mark */ - policy()->max_pause_time_ms()); - VMThread::execute(&op); - gc_succeeded = op.gc_succeeded(); - } else { - // Schedule a Full GC. - VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); - VMThread::execute(&op); - gc_succeeded = op.gc_succeeded(); + + // If VMOp was successful then it started a new cycle that the above + // wait &etc should have recognized as finishing this request. This + // differs from a non-user-request, where gc_succeeded does not imply + // a new cycle was started. + assert(!op.gc_succeeded(), "invariant"); + + // If VMOp failed because a cycle was already in progress, it is now + // complete. But it didn't finish this user-requested GC, so try + // again. + if (op.cycle_already_in_progress()) { + LOG_COLLECT_CONCURRENTLY(cause, "retry after in-progress"); + continue; } } - } while (should_retry_gc); - return gc_succeeded; + + // Collection failed and should be retried. + assert(op.transient_failure(), "invariant"); + + // If GCLocker is active, wait until clear before retrying. + if (GCLocker::is_active_and_needs_gc()) { + LOG_COLLECT_CONCURRENTLY(cause, "gc-locker stall"); + GCLocker::stall_until_clear(); + } + + LOG_COLLECT_CONCURRENTLY(cause, "retry"); + } +} + +bool G1CollectedHeap::try_collect(GCCause::Cause cause) { + assert_heap_not_locked(); + + // Lock to get consistent set of values. + uint gc_count_before; + uint full_gc_count_before; + uint old_marking_started_before; + { + MutexLocker ml(Heap_lock); + gc_count_before = total_collections(); + full_gc_count_before = total_full_collections(); + old_marking_started_before = _old_marking_cycles_started; + } + + if (should_do_concurrent_full_gc(cause)) { + return try_collect_concurrently(cause, + gc_count_before, + old_marking_started_before); + } else if (GCLocker::should_discard(cause, gc_count_before)) { + // Indicate failure to be consistent with VMOp failure due to + // another collection slipping in after our gc_count but before + // our request is processed. + return false; + } else if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc + DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { + + // Schedule a standard evacuation pause. We're setting word_size + // to 0 which means that we are not requesting a post-GC allocation. + VM_G1CollectForAllocation op(0, /* word_size */ + gc_count_before, + cause, + policy()->max_pause_time_ms()); + VMThread::execute(&op); + return op.gc_succeeded(); + } else { + // Schedule a Full GC. + VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); + VMThread::execute(&op); + return op.gc_succeeded(); + } } bool G1CollectedHeap::is_in(const void* p) const { @@ -2611,7 +2729,6 @@ VM_G1CollectForAllocation op(word_size, gc_count_before, gc_cause, - false, /* should_initiate_conc_mark */ policy()->max_pause_time_ms()); VMThread::execute(&op); diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/gc/g1/g1CollectedHeap.hpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -133,6 +133,7 @@ friend class VM_CollectForMetadataAllocation; friend class VM_G1CollectForAllocation; friend class VM_G1CollectFull; + friend class VM_G1TryInitiateConcMark; friend class VMStructs; friend class MutatorAllocRegion; friend class G1FullCollector; @@ -259,16 +260,21 @@ G1HRPrinter _hr_printer; - // It decides whether an explicit GC should start a concurrent cycle - // instead of doing a STW GC. Currently, a concurrent cycle is - // explicitly started if: - // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or - // (b) cause == _g1_humongous_allocation - // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent. - // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent. - // (e) cause == _wb_conc_mark + // Return true if an explicit GC should start a concurrent cycle instead + // of doing a STW full GC. A concurrent cycle should be started if: + // (a) cause == _g1_humongous_allocation, + // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent, + // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent, + // (d) cause == _wb_conc_mark, + // (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent. bool should_do_concurrent_full_gc(GCCause::Cause cause); + // Attempt to start a concurrent cycle with the indicated cause. + // precondition: should_do_concurrent_full_gc(cause) + bool try_collect_concurrently(GCCause::Cause cause, + uint gc_counter, + uint old_marking_started_before); + // Return true if should upgrade to full gc after an incremental one. bool should_upgrade_to_full_gc(GCCause::Cause cause); @@ -630,7 +636,7 @@ // Full GC). If concurrent is true, the caller is the outer caller // in this nesting (i.e., the concurrent cycle). Further nesting is // not currently supported. The end of this call also notifies - // the FullGCCount_lock in case a Java thread is waiting for a full + // the G1OldGCCount_lock in case a Java thread is waiting for a full // GC to happen (e.g., it called System.gc() with // +ExplicitGCInvokesConcurrent). void increment_old_marking_cycles_completed(bool concurrent); @@ -1088,10 +1094,9 @@ // "CollectedHeap" supports. virtual void collect(GCCause::Cause cause); - // Perform a collection of the heap with the given cause; if the VM operation - // fails to execute for any reason, retry only if retry_on_gc_failure is set. + // Perform a collection of the heap with the given cause. // Returns whether this collection actually executed. - bool try_collect(GCCause::Cause cause, bool retry_on_gc_failure); + bool try_collect(GCCause::Cause cause); // True iff an evacuation has failed in the most-recent collection. bool evacuation_failed() { return _evacuation_failed; } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp --- a/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -393,7 +393,7 @@ } // Update the number of full collections that have been - // completed. This will also notify the FullGCCount_lock in case a + // completed. This will also notify the G1OldGCCount_lock in case a // Java thread is waiting for a full GC to happen (e.g., it // called System.gc() with +ExplicitGCInvokesConcurrent). { diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/gc/g1/g1Policy.hpp --- a/src/hotspot/share/gc/g1/g1Policy.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/gc/g1/g1Policy.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -250,14 +250,6 @@ public: size_t pending_cards_at_gc_start() const { return _pending_cards_at_gc_start; } - size_t total_concurrent_refined_cards() const { - return _total_concurrent_refined_cards; - } - - size_t total_mutator_refined_cards() const { - return _total_mutator_refined_cards; - } - // Calculate the minimum number of old regions we'll add to the CSet // during a mixed GC. uint calc_min_old_cset_length() const; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/gc/g1/g1RemSetSummary.cpp --- a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -23,11 +23,11 @@ */ #include "precompiled.hpp" +#include "gc/g1/g1BarrierSet.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1ConcurrentRefine.hpp" #include "gc/g1/g1ConcurrentRefineThread.hpp" #include "gc/g1/g1DirtyCardQueue.hpp" -#include "gc/g1/g1Policy.hpp" #include "gc/g1/g1RemSet.hpp" #include "gc/g1/g1RemSetSummary.hpp" #include "gc/g1/g1YoungRemSetSamplingThread.hpp" @@ -36,36 +36,24 @@ #include "memory/allocation.inline.hpp" #include "runtime/thread.inline.hpp" -class GetRSThreadVTimeClosure : public ThreadClosure { -private: - G1RemSetSummary* _summary; - uint _counter; - -public: - GetRSThreadVTimeClosure(G1RemSetSummary * summary) : ThreadClosure(), _summary(summary), _counter(0) { - assert(_summary != NULL, "just checking"); - } - - virtual void do_thread(Thread* t) { - G1ConcurrentRefineThread* crt = (G1ConcurrentRefineThread*) t; - _summary->set_rs_thread_vtime(_counter, crt->vtime_accum()); - _counter++; - } -}; - void G1RemSetSummary::update() { + class CollectData : public ThreadClosure { + G1RemSetSummary* _summary; + uint _counter; + public: + CollectData(G1RemSetSummary * summary) : _summary(summary), _counter(0) {} + virtual void do_thread(Thread* t) { + G1ConcurrentRefineThread* crt = static_cast(t); + _summary->set_rs_thread_vtime(_counter, crt->vtime_accum()); + _counter++; + _summary->_total_concurrent_refined_cards += crt->total_refined_cards(); + } + } collector(this); G1CollectedHeap* g1h = G1CollectedHeap::heap(); - - const G1Policy* policy = g1h->policy(); - _total_mutator_refined_cards = policy->total_mutator_refined_cards(); - _total_concurrent_refined_cards = policy->total_concurrent_refined_cards(); - + g1h->concurrent_refine()->threads_do(&collector); + _total_mutator_refined_cards = G1BarrierSet::dirty_card_queue_set().total_mutator_refined_cards(); _num_coarsenings = HeapRegionRemSet::n_coarsenings(); - if (_rs_threads_vtimes != NULL) { - GetRSThreadVTimeClosure p(this); - g1h->concurrent_refine()->threads_do(&p); - } set_sampling_thread_vtime(g1h->sampling_thread()->vtime_accum()); } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/gc/g1/g1RemSetSummary.hpp --- a/src/hotspot/share/gc/g1/g1RemSetSummary.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/gc/g1/g1RemSetSummary.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -34,8 +34,6 @@ class G1RemSetSummary { private: - friend class GetRSThreadVTimeClosure; - size_t _total_mutator_refined_cards; size_t _total_concurrent_refined_cards; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/gc/g1/g1VMOperations.cpp --- a/src/hotspot/share/gc/g1/g1VMOperations.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/gc/g1/g1VMOperations.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -40,17 +40,61 @@ _gc_succeeded = g1h->do_full_collection(true /* explicit_gc */, false /* clear_all_soft_refs */); } +VM_G1TryInitiateConcMark::VM_G1TryInitiateConcMark(uint gc_count_before, + GCCause::Cause gc_cause, + double target_pause_time_ms) : + VM_GC_Operation(gc_count_before, gc_cause), + _target_pause_time_ms(target_pause_time_ms), + _transient_failure(false), + _cycle_already_in_progress(false), + _gc_succeeded(false) +{} + +bool VM_G1TryInitiateConcMark::doit_prologue() { + bool result = VM_GC_Operation::doit_prologue(); + // The prologue can fail for a couple of reasons. The first is that another GC + // got scheduled and prevented the scheduling of the initial mark GC. The + // second is that the GC locker may be active and the heap can't be expanded. + // In both cases we want to retry the GC so that the initial mark pause is + // actually scheduled. In the second case, however, we should stall until + // until the GC locker is no longer active and then retry the initial mark GC. + if (!result) _transient_failure = true; + return result; +} + +void VM_G1TryInitiateConcMark::doit() { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + + GCCauseSetter x(g1h, _gc_cause); + if (!g1h->policy()->force_initial_mark_if_outside_cycle(_gc_cause)) { + // Failure to force the next GC pause to be an initial mark indicates + // there is already a concurrent marking cycle in progress. Set flag + // to notify the caller and return immediately. + _cycle_already_in_progress = true; + } else if (!g1h->do_collection_pause_at_safepoint(_target_pause_time_ms)) { + // Failure to perform the collection at all occurs because GCLocker is + // active, and we have the bad luck to be the collection request that + // makes a later _gc_locker collection needed. (Else we would have hit + // the GCLocker check in the prologue.) + _transient_failure = true; + } else if (g1h->should_upgrade_to_full_gc(_gc_cause)) { + // GC ran, but we're still in trouble and need a full GC. + log_info(gc, ergo)("Attempting maximally compacting collection"); + _gc_succeeded = g1h->do_full_collection(false, /* explicit gc */ + true /* clear_all_soft_refs */); + guarantee(_gc_succeeded, "Elevated collections during the safepoint must always succeed"); + } else { + _gc_succeeded = true; + } +} + VM_G1CollectForAllocation::VM_G1CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause gc_cause, - bool should_initiate_conc_mark, double target_pause_time_ms) : VM_CollectForAllocation(word_size, gc_count_before, gc_cause), _gc_succeeded(false), - _should_initiate_conc_mark(should_initiate_conc_mark), - _should_retry_gc(false), - _target_pause_time_ms(target_pause_time_ms), - _old_marking_cycles_completed_before(0) { + _target_pause_time_ms(target_pause_time_ms) { guarantee(target_pause_time_ms > 0.0, "target_pause_time_ms = %1.6lf should be positive", @@ -58,26 +102,8 @@ _gc_cause = gc_cause; } -bool VM_G1CollectForAllocation::doit_prologue() { - bool res = VM_CollectForAllocation::doit_prologue(); - if (!res) { - if (_should_initiate_conc_mark) { - // The prologue can fail for a couple of reasons. The first is that another GC - // got scheduled and prevented the scheduling of the initial mark GC. The - // second is that the GC locker may be active and the heap can't be expanded. - // In both cases we want to retry the GC so that the initial mark pause is - // actually scheduled. In the second case, however, we should stall until - // until the GC locker is no longer active and then retry the initial mark GC. - _should_retry_gc = true; - } - } - return res; -} - void VM_G1CollectForAllocation::doit() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); - assert(!_should_initiate_conc_mark || g1h->should_do_concurrent_full_gc(_gc_cause), - "only a GC locker, a System.gc(), stats update, whitebox, or a hum allocation induced GC should start a cycle"); if (_word_size > 0) { // An allocation has been requested. So, try to do that first. @@ -92,44 +118,6 @@ } GCCauseSetter x(g1h, _gc_cause); - if (_should_initiate_conc_mark) { - // It's safer to read old_marking_cycles_completed() here, given - // that noone else will be updating it concurrently. Since we'll - // only need it if we're initiating a marking cycle, no point in - // setting it earlier. - _old_marking_cycles_completed_before = g1h->old_marking_cycles_completed(); - - // At this point we are supposed to start a concurrent cycle. We - // will do so if one is not already in progress. - bool res = g1h->policy()->force_initial_mark_if_outside_cycle(_gc_cause); - - // The above routine returns true if we were able to force the - // next GC pause to be an initial mark; it returns false if a - // marking cycle is already in progress. - // - // If a marking cycle is already in progress just return and skip the - // pause below - if the reason for requesting this initial mark pause - // was due to a System.gc() then the requesting thread should block in - // doit_epilogue() until the marking cycle is complete. - // - // If this initial mark pause was requested as part of a humongous - // allocation then we know that the marking cycle must just have - // been started by another thread (possibly also allocating a humongous - // object) as there was no active marking cycle when the requesting - // thread checked before calling collect() in - // attempt_allocation_humongous(). Retrying the GC, in this case, - // will cause the requesting thread to spin inside collect() until the - // just started marking cycle is complete - which may be a while. So - // we do NOT retry the GC. - if (!res) { - assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating"); - if (_gc_cause != GCCause::_g1_humongous_allocation) { - _should_retry_gc = true; - } - return; - } - } - // Try a partial collection of some kind. _gc_succeeded = g1h->do_collection_pause_at_safepoint(_target_pause_time_ms); @@ -138,66 +126,15 @@ // An allocation had been requested. Do it, eventually trying a stronger // kind of GC. _result = g1h->satisfy_failed_allocation(_word_size, &_gc_succeeded); - } else { - bool should_upgrade_to_full = g1h->should_upgrade_to_full_gc(_gc_cause); - - if (should_upgrade_to_full) { - // There has been a request to perform a GC to free some space. We have no - // information on how much memory has been asked for. In case there are - // absolutely no regions left to allocate into, do a maximally compacting full GC. - log_info(gc, ergo)("Attempting maximally compacting collection"); - _gc_succeeded = g1h->do_full_collection(false, /* explicit gc */ - true /* clear_all_soft_refs */); - } + } else if (g1h->should_upgrade_to_full_gc(_gc_cause)) { + // There has been a request to perform a GC to free some space. We have no + // information on how much memory has been asked for. In case there are + // absolutely no regions left to allocate into, do a maximally compacting full GC. + log_info(gc, ergo)("Attempting maximally compacting collection"); + _gc_succeeded = g1h->do_full_collection(false, /* explicit gc */ + true /* clear_all_soft_refs */); } guarantee(_gc_succeeded, "Elevated collections during the safepoint must always succeed."); - } else { - assert(_result == NULL, "invariant"); - // The only reason for the pause to not be successful is that, the GC locker is - // active (or has become active since the prologue was executed). In this case - // we should retry the pause after waiting for the GC locker to become inactive. - _should_retry_gc = true; - } -} - -void VM_G1CollectForAllocation::doit_epilogue() { - VM_CollectForAllocation::doit_epilogue(); - - // If the pause was initiated by a System.gc() and - // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle - // that just started (or maybe one that was already in progress) to - // finish. - if (GCCause::is_user_requested_gc(_gc_cause) && - _should_initiate_conc_mark) { - assert(ExplicitGCInvokesConcurrent, - "the only way to be here is if ExplicitGCInvokesConcurrent is set"); - - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - - // In the doit() method we saved g1h->old_marking_cycles_completed() - // in the _old_marking_cycles_completed_before field. We have to - // wait until we observe that g1h->old_marking_cycles_completed() - // has increased by at least one. This can happen if a) we started - // a cycle and it completes, b) a cycle already in progress - // completes, or c) a Full GC happens. - - // If the condition has already been reached, there's no point in - // actually taking the lock and doing the wait. - if (g1h->old_marking_cycles_completed() <= - _old_marking_cycles_completed_before) { - // The following is largely copied from CMS - - Thread* thr = Thread::current(); - assert(thr->is_Java_thread(), "invariant"); - JavaThread* jt = (JavaThread*)thr; - ThreadToNativeFromVM native(jt); - - MonitorLocker ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); - while (g1h->old_marking_cycles_completed() <= - _old_marking_cycles_completed_before) { - ml.wait(); - } - } } } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/gc/g1/g1VMOperations.hpp --- a/src/hotspot/share/gc/g1/g1VMOperations.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/gc/g1/g1VMOperations.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -45,29 +45,39 @@ _gc_succeeded(false) { } virtual VMOp_Type type() const { return VMOp_G1CollectFull; } virtual void doit(); - bool gc_succeeded() { return _gc_succeeded; } + bool gc_succeeded() const { return _gc_succeeded; } +}; + +class VM_G1TryInitiateConcMark : public VM_GC_Operation { + double _target_pause_time_ms; + bool _transient_failure; + bool _cycle_already_in_progress; + bool _gc_succeeded; + +public: + VM_G1TryInitiateConcMark(uint gc_count_before, + GCCause::Cause gc_cause, + double target_pause_time_ms); + virtual VMOp_Type type() const { return VMOp_G1TryInitiateConcMark; } + virtual bool doit_prologue(); + virtual void doit(); + bool transient_failure() const { return _transient_failure; } + bool cycle_already_in_progress() const { return _cycle_already_in_progress; } + bool gc_succeeded() const { return _gc_succeeded; } }; class VM_G1CollectForAllocation : public VM_CollectForAllocation { bool _gc_succeeded; - - bool _should_initiate_conc_mark; - bool _should_retry_gc; double _target_pause_time_ms; - uint _old_marking_cycles_completed_before; public: VM_G1CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause gc_cause, - bool should_initiate_conc_mark, double target_pause_time_ms); virtual VMOp_Type type() const { return VMOp_G1CollectForAllocation; } - virtual bool doit_prologue(); virtual void doit(); - virtual void doit_epilogue(); - bool should_retry_gc() const { return _should_retry_gc; } - bool gc_succeeded() { return _gc_succeeded; } + bool gc_succeeded() const { return _gc_succeeded; } }; // Concurrent G1 stop-the-world operations such as remark and cleanup. diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp --- a/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -90,8 +90,7 @@ if ((os::elapsedTime() - _last_periodic_gc_attempt_s) > (G1PeriodicGCInterval / 1000.0)) { log_debug(gc, periodic)("Checking for periodic GC."); if (should_start_periodic_gc()) { - if (!G1CollectedHeap::heap()->try_collect(GCCause::_g1_periodic_collection, - false /* retry_on_vmop_failure */)) { + if (!G1CollectedHeap::heap()->try_collect(GCCause::_g1_periodic_collection)) { log_debug(gc, periodic)("GC request denied. Skipping."); } } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/gc/shared/gc_globals.hpp --- a/src/hotspot/share/gc/shared/gc_globals.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/gc/shared/gc_globals.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -228,10 +228,6 @@ "A System.gc() request invokes a concurrent collection; " \ "(effective only when using concurrent collectors)") \ \ - product(bool, GCLockerInvokesConcurrent, false, \ - "The exit of a JNI critical section necessitating a scavenge, " \ - "also kicks off a background concurrent collection") \ - \ product(uintx, GCLockerEdenExpansionPercent, 5, \ "How much the GC can expand the eden by while the GC locker " \ "is active (as a percentage)") \ diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/include/cds.h --- a/src/hotspot/share/include/cds.h Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/include/cds.h Mon Nov 18 12:40:06 2019 -0500 @@ -33,26 +33,29 @@ // // Also, this is a C header file. Do not use C++ here. -#define NUM_CDS_REGIONS 8 // this must be the same as MetaspaceShared::n_regions +#define NUM_CDS_REGIONS 9 // this must be the same as MetaspaceShared::n_regions #define CDS_ARCHIVE_MAGIC 0xf00baba2 #define CDS_DYNAMIC_ARCHIVE_MAGIC 0xf00baba8 -#define CURRENT_CDS_ARCHIVE_VERSION 8 +#define CURRENT_CDS_ARCHIVE_VERSION 9 #define INVALID_CDS_ARCHIVE_VERSION -1 struct CDSFileMapRegion { - int _crc; // crc checksum of the current space - size_t _file_offset; // sizeof(this) rounded to vm page size - union { - char* _base; // copy-on-write base address - size_t _offset; // offset from the compressed oop encoding base, only used - // by archive heap space - } _addr; - size_t _used; // for setting space top on read - int _read_only; // read only space? - int _allow_exec; // executable code in space? - void* _oopmap; // bitmap for relocating embedded oops - size_t _oopmap_size_in_bits; - int _is_heap_region; // used in debug build only. + int _crc; // CRC checksum of this region. + int _read_only; // read only region? + int _allow_exec; // executable code in this region? + int _is_heap_region; // Used by SA and debug build. + int _is_bitmap_region; // Relocation bitmap for RO/RW/MC/MD regions (used by SA and debug build). + int _mapped_from_file; // Is this region mapped from a file? + // If false, this region was initialized using os::read(). + size_t _file_offset; // Data for this region starts at this offset in the archive file. + size_t _mapping_offset; // This region should be mapped at this offset from the base address + // - for non-heap regions, the base address is SharedBaseAddress + // - for heap regions, the base address is the compressed oop encoding base + size_t _used; // Number of bytes actually used by this region (excluding padding bytes added + // for alignment purposed. + size_t _oopmap_offset; // Bitmap for relocating embedded oops (offset from SharedBaseAddress). + size_t _oopmap_size_in_bits; + char* _mapped_base; // Actually mapped address (NULL if this region is not mapped). }; struct CDSFileMapHeaderBase { diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/interpreter/bytecodeInterpreter.cpp --- a/src/hotspot/share/interpreter/bytecodeInterpreter.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/interpreter/bytecodeInterpreter.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2462,8 +2462,8 @@ if (VerifyOops) method->verify(); if (cache->has_appendix()) { - ConstantPool* constants = METHOD->constants(); - SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); + constantPoolHandle cp(THREAD, METHOD->constants()); + SET_STACK_OBJECT(cache->appendix_if_resolved(cp), 0); MORE_STACK(1); } @@ -2493,8 +2493,8 @@ if (VerifyOops) method->verify(); if (cache->has_appendix()) { - ConstantPool* constants = METHOD->constants(); - SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); + constantPoolHandle cp(THREAD, METHOD->constants()); + SET_STACK_OBJECT(cache->appendix_if_resolved(cp), 0); MORE_STACK(1); } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/interpreter/interpreterRuntime.cpp --- a/src/hotspot/share/interpreter/interpreterRuntime.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -356,7 +356,7 @@ #ifdef CC_INTERP // As legacy note_trap, but we have more arguments. JRT_ENTRY(void, InterpreterRuntime::note_trap(JavaThread* thread, int reason, Method *method, int trap_bci)) - methodHandle trap_method(method); + methodHandle trap_method(thread, method); note_trap_inner(thread, reason, trap_method, trap_bci, THREAD); JRT_END diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/libadt/vectset.cpp --- a/src/hotspot/share/libadt/vectset.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/libadt/vectset.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -26,62 +26,50 @@ #include "libadt/vectset.hpp" #include "memory/allocation.inline.hpp" #include "memory/arena.hpp" +#include "utilities/count_leading_zeros.hpp" -VectorSet::VectorSet(Arena *arena) { - _set_arena = arena; - size = 2; // Small initial size - data = (uint32_t *)_set_arena->Amalloc(size*sizeof(uint32_t)); - data[0] = 0; // No elements - data[1] = 0; +VectorSet::VectorSet(Arena *arena) : _size(2), + _data(NEW_ARENA_ARRAY(arena, uint32_t, 2)), + _set_arena(arena) { + _data[0] = 0; + _data[1] = 0; } // Expand the existing set to a bigger size -void VectorSet::grow(uint newsize) { - newsize = (newsize+31) >> 5; - uint x = size; - while (x < newsize) { - x <<= 1; - } - data = (uint32_t *)_set_arena->Arealloc(data, size*sizeof(uint32_t), x*sizeof(uint32_t)); - memset((char*)(data + size), 0, (x - size) * sizeof(uint32_t)); - size = x; +void VectorSet::grow(uint new_size) { + new_size = (new_size + bit_mask) >> word_bits; + assert(new_size != 0 && new_size < (1U << 31), ""); + uint x = (1U << 31) >> (count_leading_zeros(new_size) - 1); + _data = REALLOC_ARENA_ARRAY(_set_arena, uint32_t, _data, _size, x); + Copy::zero_to_bytes(_data + _size, (x - _size) * sizeof(uint32_t)); + _size = x; } // Insert a member into an existing Set. void VectorSet::insert(uint elem) { - uint word = elem >> 5; - uint32_t mask = 1L << (elem & 31); - if (word >= size) { + uint32_t word = elem >> word_bits; + uint32_t mask = 1U << (elem & bit_mask); + if (word >= _size) { grow(elem + 1); } - data[word] |= mask; + _data[word] |= mask; } -// Clear a set -void VectorSet::clear() { - if( size > 100 ) { // Reclaim storage only if huge - FREE_RESOURCE_ARRAY(uint32_t,data,size); - size = 2; // Small initial size - data = NEW_RESOURCE_ARRAY(uint32_t,size); - } - memset(data, 0, size*sizeof(uint32_t)); +// Resets the storage +void VectorSet::reset_memory() { + assert(_size >= 2, "_size can never be less than 2"); + _data = REALLOC_ARENA_ARRAY(_set_arena, uint32_t, _data, _size, 2); + _size = 2; + _data[0] = 0; + _data[1] = 0; } // Return true if the set is empty bool VectorSet::is_empty() const { - for (uint32_t i = 0; i < size; i++) { - if (data[i] != 0) { + for (uint32_t i = 0; i < _size; i++) { + if (_data[i] != 0) { return false; } } return true; } - -int VectorSet::hash() const { - uint32_t _xor = 0; - uint lim = ((size < 4) ? size : 4); - for (uint i = 0; i < lim; i++) { - _xor ^= data[i]; - } - return (int)_xor; -} diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/libadt/vectset.hpp --- a/src/hotspot/share/libadt/vectset.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/libadt/vectset.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -26,6 +26,7 @@ #define SHARE_LIBADT_VECTSET_HPP #include "memory/allocation.hpp" +#include "utilities/copy.hpp" // Vector Sets @@ -35,26 +36,33 @@ //------------------------------VectorSet-------------------------------------- class VectorSet : public ResourceObj { private: - uint size; // Size of data IN LONGWORDS (32bits) - uint32_t* data; // The data, bit packed - Arena *_set_arena; + + static const uint word_bits = 5; + static const uint bit_mask = 31; + + uint _size; // Size of data in 32-bit words + uint32_t* _data; // The data, bit packed + Arena* _set_arena; void grow(uint newsize); // Grow vector to required bitsize - + void reset_memory(); public: VectorSet(Arena *arena); ~VectorSet() {} + void insert(uint elem); - - void clear(); bool is_empty() const; - int hash() const; void reset() { - memset(data, 0, size*sizeof(uint32_t)); + Copy::zero_to_bytes(_data, _size * sizeof(uint32_t)); } - - // Expose internals for speed-critical fast iterators - uint word_size() const { return size; } + void clear() { + // Reclaim storage if huge + if (_size > 100) { + reset_memory(); + } else { + reset(); + } + } // Fast inlined "test and set". Replaces the idiom: // if (visited.test(idx)) return; @@ -62,46 +70,46 @@ // With: // if (visited.test_set(idx)) return; // - int test_set(uint elem) { - uint word = elem >> 5; // Get the longword offset - if (word >= size) { + bool test_set(uint elem) { + uint32_t word = elem >> word_bits; + if (word >= _size) { // Then grow; set; return 0; this->insert(elem); - return 0; + return false; } - uint32_t mask = 1L << (elem & 31); // Get bit mask - uint32_t datum = data[word] & mask;// Get bit - data[word] |= mask; // Set bit - return datum; // Return bit + uint32_t mask = 1U << (elem & bit_mask); + uint32_t data = _data[word]; + _data[word] = data | mask; + return (data & mask) != 0; } // Fast inlined test - int test(uint elem) const { - uint word = elem >> 5; - if (word >= size) { - return 0; + bool test(uint elem) const { + uint32_t word = elem >> word_bits; + if (word >= _size) { + return false; } - uint32_t mask = 1L << (elem & 31); - return data[word] & mask; + uint32_t mask = 1U << (elem & bit_mask); + return (_data[word] & mask) != 0; } void remove(uint elem) { - uint word = elem >> 5; - if (word >= size) { + uint32_t word = elem >> word_bits; + if (word >= _size) { return; } - uint32_t mask = 1L << (elem & 31); - data[word] &= ~mask; // Clear bit + uint32_t mask = 1U << (elem & bit_mask); + _data[word] &= ~mask; // Clear bit } // Fast inlined set void set(uint elem) { - uint word = elem >> 5; - if (word >= size) { + uint32_t word = elem >> word_bits; + if (word >= _size) { this->insert(elem); } else { - uint32_t mask = 1L << (elem & 31); - data[word] |= mask; + uint32_t mask = 1U << (elem & bit_mask); + _data[word] |= mask; } } }; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/memory/allocation.hpp --- a/src/hotspot/share/memory/allocation.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/memory/allocation.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -283,11 +283,6 @@ _shared_metaspace_top = top; } - static void expand_shared_metaspace_range(void* top) { - assert(top >= _shared_metaspace_top, "must be"); - _shared_metaspace_top = top; - } - static void* shared_metaspace_base() { return _shared_metaspace_base; } static void* shared_metaspace_top() { return _shared_metaspace_top; } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/memory/archiveUtils.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/memory/archiveUtils.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "memory/archiveUtils.hpp" +#include "memory/metaspace.hpp" +#include "utilities/bitMap.inline.hpp" + +#if INCLUDE_CDS + +CHeapBitMap* ArchivePtrMarker::_ptrmap = NULL; +address* ArchivePtrMarker::_ptr_base; +address* ArchivePtrMarker::_ptr_end; +bool ArchivePtrMarker::_compacted; + +void ArchivePtrMarker::initialize(CHeapBitMap* ptrmap, address* ptr_base, address* ptr_end) { + assert(_ptrmap == NULL, "initialize only once"); + _ptr_base = ptr_base; + _ptr_end = ptr_end; + _compacted = false; + _ptrmap = ptrmap; + + // Use this as initial guesstimate. We should need less space in the + // archive, but if we're wrong the bitmap will be expanded automatically. + size_t estimated_archive_size = MetaspaceGC::capacity_until_GC(); + // But set it smaller in debug builds so we always test the expansion code. + // (Default archive is about 12MB). + DEBUG_ONLY(estimated_archive_size = 6 * M); + + // We need one bit per pointer in the archive. + _ptrmap->initialize(estimated_archive_size / sizeof(intptr_t)); +} + +void ArchivePtrMarker::mark_pointer(address* ptr_loc) { + assert(_ptrmap != NULL, "not initialized"); + assert(!_compacted, "cannot mark anymore"); + + if (_ptr_base <= ptr_loc && ptr_loc < _ptr_end) { + address value = *ptr_loc; + if (value != NULL) { + assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses"); + size_t idx = ptr_loc - _ptr_base; + if (_ptrmap->size() <= idx) { + _ptrmap->resize((idx + 1) * 2); + } + assert(idx < _ptrmap->size(), "must be"); + _ptrmap->set_bit(idx); + //tty->print_cr("Marking pointer [%p] -> %p @ " SIZE_FORMAT_W(9), ptr_loc, *ptr_loc, idx); + } + } +} + +class ArchivePtrBitmapCleaner: public BitMapClosure { + CHeapBitMap* _ptrmap; + address* _ptr_base; + address _relocatable_base; + address _relocatable_end; + size_t _max_non_null_offset; + +public: + ArchivePtrBitmapCleaner(CHeapBitMap* ptrmap, address* ptr_base, address relocatable_base, address relocatable_end) : + _ptrmap(ptrmap), _ptr_base(ptr_base), + _relocatable_base(relocatable_base), _relocatable_end(relocatable_end), _max_non_null_offset(0) {} + + bool do_bit(size_t offset) { + address* ptr_loc = _ptr_base + offset; + address ptr_value = *ptr_loc; + if (ptr_value != NULL) { + assert(_relocatable_base <= ptr_value && ptr_value < _relocatable_end, "do not point to arbitrary locations!"); + if (_max_non_null_offset < offset) { + _max_non_null_offset = offset; + } + } else { + _ptrmap->clear_bit(offset); + DEBUG_ONLY(log_trace(cds, reloc)("Clearing pointer [" PTR_FORMAT "] -> NULL @ " SIZE_FORMAT_W(9), p2i(ptr_loc), offset)); + } + + return true; + } + + size_t max_non_null_offset() const { return _max_non_null_offset; } +}; + +void ArchivePtrMarker::compact(address relocatable_base, address relocatable_end) { + assert(!_compacted, "cannot compact again"); + ArchivePtrBitmapCleaner cleaner(_ptrmap, _ptr_base, relocatable_base, relocatable_end); + _ptrmap->iterate(&cleaner); + compact(cleaner.max_non_null_offset()); +} + +void ArchivePtrMarker::compact(size_t max_non_null_offset) { + assert(!_compacted, "cannot compact again"); + _ptrmap->resize(max_non_null_offset + 1); + _compacted = true; +} + +#endif // INCLUDE_CDS diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/memory/archiveUtils.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/memory/archiveUtils.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_MEMORY_ARCHIVEUTILS_HPP +#define SHARE_MEMORY_ARCHIVEUTILS_HPP + +#include "logging/log.hpp" +#include "runtime/arguments.hpp" +#include "utilities/bitMap.hpp" + +// ArchivePtrMarker is used to mark the location of pointers embedded in a CDS archive. E.g., when an +// InstanceKlass k is dumped, we mark the location of the k->_name pointer by effectively calling +// mark_pointer(/*ptr_loc=*/&k->_name). It's required that (_prt_base <= ptr_loc < _ptr_end). _ptr_base is +// fixed, but _ptr_end can be expanded as more objects are dumped. +class ArchivePtrMarker : AllStatic { + static CHeapBitMap* _ptrmap; + static address* _ptr_base; + static address* _ptr_end; + + // Once _ptrmap is compacted, we don't allow bit marking anymore. This is to + // avoid unintentional copy operations after the bitmap has been finalized and written. + static bool _compacted; +public: + static void initialize(CHeapBitMap* ptrmap, address* ptr_base, address* ptr_end); + static void mark_pointer(address* ptr_loc); + static void compact(address relocatable_base, address relocatable_end); + static void compact(size_t max_non_null_offset); + + template + static void mark_pointer(T* ptr_loc) { + mark_pointer((address*)ptr_loc); + } + + static void expand_ptr_end(address *new_ptr_end) { + assert(_ptr_end <= new_ptr_end, "must be"); + _ptr_end = new_ptr_end; + } + + static CHeapBitMap* ptrmap() { + return _ptrmap; + } +}; + +// SharedDataRelocator is used to shift pointers in the CDS archive. +// +// The CDS archive is basically a contiguous block of memory (divided into several regions) +// that contains multiple objects. The objects may contain direct pointers that point to other objects +// within the archive (e.g., InstanceKlass::_name points to a Symbol in the archive). During dumping, we +// built a bitmap that marks the locations of all these pointers (using ArchivePtrMarker, see comments above). +// +// The contents of the archive assumes that it’s mapped at the default SharedBaseAddress (e.g. 0x800000000). +// If the archive ends up being mapped at a different address (e.g. 0x810000000), SharedDataRelocator +// is used to shift each marked pointer by a delta (0x10000000 in this example), so that it points to +// the actually mapped location of the target object. +template +class SharedDataRelocator: public BitMapClosure { + // for all (address** p), where (is_marked(p) && _patch_base <= p && p < _patch_end) { *p += delta; } + + // Patch all pointers within this region that are marked. + address* _patch_base; + address* _patch_end; + + // Before patching, all pointers must point to this region. + address _valid_old_base; + address _valid_old_end; + + // After patching, all pointers must point to this region. + address _valid_new_base; + address _valid_new_end; + + // How much to relocate for each pointer. + intx _delta; + + // The following fields are used only when COMPACTING == true; + // The highest offset (inclusive) in the bitmap that contains a non-null pointer. + // This is used at dump time to reduce the size of the bitmap (which may have been over-allocated). + size_t _max_non_null_offset; + CHeapBitMap* _ptrmap; + + public: + SharedDataRelocator(address* patch_base, address* patch_end, + address valid_old_base, address valid_old_end, + address valid_new_base, address valid_new_end, intx delta, + CHeapBitMap* ptrmap = NULL) : + _patch_base(patch_base), _patch_end(patch_end), + _valid_old_base(valid_old_base), _valid_old_end(valid_old_end), + _valid_new_base(valid_new_base), _valid_new_end(valid_new_end), + _delta(delta) { + log_debug(cds, reloc)("SharedDataRelocator::_patch_base = " PTR_FORMAT, p2i(_patch_base)); + log_debug(cds, reloc)("SharedDataRelocator::_patch_end = " PTR_FORMAT, p2i(_patch_end)); + log_debug(cds, reloc)("SharedDataRelocator::_valid_old_base = " PTR_FORMAT, p2i(_valid_old_base)); + log_debug(cds, reloc)("SharedDataRelocator::_valid_old_end = " PTR_FORMAT, p2i(_valid_old_end)); + log_debug(cds, reloc)("SharedDataRelocator::_valid_new_base = " PTR_FORMAT, p2i(_valid_new_base)); + log_debug(cds, reloc)("SharedDataRelocator::_valid_new_end = " PTR_FORMAT, p2i(_valid_new_end)); + if (COMPACTING) { + assert(ptrmap != NULL, "must be"); + _max_non_null_offset = 0; + _ptrmap = ptrmap; + } else { + // Don't touch the _max_non_null_offset and _ptrmap fields. Hopefully a good C++ compiler can + // elide them. + assert(ptrmap == NULL, "must be"); + } + } + + size_t max_non_null_offset() { + assert(COMPACTING, "must be"); + return _max_non_null_offset; + } + + inline bool do_bit(size_t offset); +}; + + +#endif // SHARE_MEMORY_ARCHIVEUTILS_HPP diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/memory/archiveUtils.inline.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/memory/archiveUtils.inline.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_MEMORY_ARCHIVEUTILS_INLINE_HPP +#define SHARE_MEMORY_ARCHIVEUTILS_INLINE_HPP + +#include "memory/archiveUtils.hpp" +#include "utilities/bitMap.inline.hpp" + +template +inline bool SharedDataRelocator::do_bit(size_t offset) { + address* p = _patch_base + offset; + assert(_patch_base <= p && p < _patch_end, "must be"); + + address old_ptr = *p; + assert(_valid_old_base <= old_ptr && old_ptr < _valid_old_end, "must be"); + + if (COMPACTING) { + // Start-up performance: use a template parameter to elide this block for run-time archive + // relocation. + assert(Arguments::is_dumping_archive(), "Don't do this during run-time archive loading!"); + if (old_ptr == NULL) { + _ptrmap->clear_bit(offset); + DEBUG_ONLY(log_trace(cds, reloc)("Clearing pointer [" PTR_FORMAT "] -> NULL @ " SIZE_FORMAT_W(9), p2i(p), offset)); + return true; + } else { + _max_non_null_offset = offset; + } + } else { + assert(old_ptr != NULL, "bits for NULL pointers should have been cleaned at dump time"); + } + + address new_ptr = old_ptr + _delta; + assert(_valid_new_base <= new_ptr && new_ptr < _valid_new_end, "must be"); + + DEBUG_ONLY(log_trace(cds, reloc)("Patch2: @%8d [" PTR_FORMAT "] " PTR_FORMAT " -> " PTR_FORMAT, + (int)offset, p2i(p), p2i(old_ptr), p2i(new_ptr))); + *p = new_ptr; + return true; // keep iterating +} + +#endif // SHARE_MEMORY_ARCHIVEUTILS_INLINE_HPP diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/memory/arena.cpp diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/memory/dynamicArchive.cpp --- a/src/hotspot/share/memory/dynamicArchive.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/memory/dynamicArchive.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -29,12 +29,13 @@ #include "classfile/systemDictionary.hpp" #include "classfile/systemDictionaryShared.hpp" #include "logging/log.hpp" +#include "memory/archiveUtils.inline.hpp" +#include "memory/dynamicArchive.hpp" #include "memory/metadataFactory.hpp" #include "memory/metaspace.hpp" #include "memory/metaspaceClosure.hpp" #include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" -#include "memory/dynamicArchive.hpp" #include "oops/compressedOops.hpp" #include "oops/objArrayKlass.hpp" #include "prims/jvmtiRedefineClasses.hpp" @@ -50,7 +51,6 @@ #endif class DynamicArchiveBuilder : ResourceObj { - CHeapBitMap _ptrmap; static unsigned my_hash(const address& a) { return primitive_hash
(a); } @@ -64,7 +64,7 @@ 16384, ResourceObj::C_HEAP> RelocationTable; RelocationTable _new_loc_table; - intx _buffer_to_target_delta; + static intx _buffer_to_target_delta; DumpRegion* _current_dump_space; @@ -77,10 +77,7 @@ public: void mark_pointer(address* ptr_loc) { - if (is_in_buffer_space(ptr_loc)) { - size_t idx = pointer_delta(ptr_loc, _alloc_bottom, sizeof(address)); - _ptrmap.set_bit(idx); - } + ArchivePtrMarker::mark_pointer(ptr_loc); } DumpRegion* current_dump_space() const { @@ -128,6 +125,28 @@ return pp != NULL; } + static int dynamic_dump_method_comparator(Method* a, Method* b) { + Symbol* a_name = a->name(); + Symbol* b_name = b->name(); + + if (a_name == b_name) { + return 0; + } + + if (!MetaspaceShared::is_in_shared_metaspace(a_name)) { + // a_name points to a Symbol in the top archive. + // When this method is called, a_name is still pointing to the output space. + // Translate it to point to the output space, so that it can be compared with + // Symbols in the base archive. + a_name = (Symbol*)(address(a_name) + _buffer_to_target_delta); + } + if (!MetaspaceShared::is_in_shared_metaspace(b_name)) { + b_name = (Symbol*)(address(b_name) + _buffer_to_target_delta); + } + + return a_name->fast_compare(b_name); + } + protected: enum FollowMode { make_a_copy, point_to_it, set_to_null @@ -240,6 +259,16 @@ return true; // keep recursing until every object is visited exactly once. } + + virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) { + assert(type == _method_entry_ref, "only special type allowed for now"); + address obj = ref->obj(); + address new_obj = _builder->get_new_loc(ref); + size_t offset = pointer_delta(p, obj, sizeof(u1)); + intptr_t* new_p = (intptr_t*)(new_obj + offset); + assert(*p == *new_p, "must be a copy"); + ArchivePtrMarker::mark_pointer((address*)new_p); + } }; class EmbeddedRefUpdater: public MetaspaceClosure { @@ -331,7 +360,7 @@ public: EmbeddedRefMarker(DynamicArchiveBuilder* shuffler) : _builder(shuffler) {} virtual bool do_ref(Ref* ref, bool read_only) { - if (ref->not_null() && _builder->is_in_buffer_space(ref->obj())) { + if (ref->not_null()) { _builder->mark_pointer(ref->addr()); } return false; // Do not recurse. @@ -441,10 +470,10 @@ p2i(obj), p2i(p), bytes, MetaspaceObj::type_name(ref->msotype())); memcpy(p, obj, bytes); - intptr_t* cloned_vtable = MetaspaceShared::fix_cpp_vtable_for_dynamic_archive(ref->msotype(), p); if (cloned_vtable != NULL) { update_pointer((address*)p, (address)cloned_vtable, "vtb", 0, /*is_mso_pointer*/false); + mark_pointer((address*)p); } return (address)p; @@ -551,6 +580,9 @@ address reserved_bottom = reserve_space_and_init_buffer_to_target_delta(); init_header(reserved_bottom); + CHeapBitMap ptrmap; + ArchivePtrMarker::initialize(&ptrmap, (address*)reserved_bottom, (address*)current_dump_space()->top()); + verify_estimate_size(sizeof(DynamicArchiveHeader), "header"); log_info(cds, dynamic)("Copying %d klasses and %d symbols", @@ -576,10 +608,6 @@ iterate_roots(&ro_copier); } - size_t bitmap_size = pointer_delta(current_dump_space()->top(), - _alloc_bottom, sizeof(address)); - _ptrmap.initialize(bitmap_size); - { log_info(cds)("Relocating embedded pointers ... "); ResourceMark rm; @@ -653,7 +681,7 @@ it->push(&_symbols->at(i)); } - _header->shared_path_table_metaspace_pointers_do(it); + FileMapInfo::metaspace_pointers_do(it); // Do not call these again, as we have already collected all the classes and symbols // that we want to archive. Also, these calls would corrupt the tables when @@ -666,6 +694,9 @@ } }; +intx DynamicArchiveBuilder::_buffer_to_target_delta; + + size_t DynamicArchiveBuilder::estimate_archive_size() { // size of the symbol table and two dictionaries, plus the RunTimeSharedClassInfo's _estimated_hashtable_bytes = 0; @@ -688,26 +719,16 @@ address DynamicArchiveBuilder::reserve_space_and_init_buffer_to_target_delta() { size_t total = estimate_archive_size(); - bool large_pages = false; // No large pages when dumping the CDS archive. - size_t increment = align_up(1*G, reserve_alignment()); - char* addr = (char*)align_up(CompressedKlassPointers::base() + MetaspaceSize + increment, - reserve_alignment()); - - ReservedSpace* rs = MetaspaceShared::reserve_shared_rs( - total, reserve_alignment(), large_pages, addr); - while (!rs->is_reserved() && (addr + increment > addr)) { - addr += increment; - rs = MetaspaceShared::reserve_shared_rs( - total, reserve_alignment(), large_pages, addr); - } - if (!rs->is_reserved()) { + ReservedSpace rs = MetaspaceShared::reserve_shared_space(total); + if (!rs.is_reserved()) { log_error(cds, dynamic)("Failed to reserve %d bytes of output buffer.", (int)total); vm_direct_exit(0); } - address buffer_base = (address)rs->base(); + address buffer_base = (address)rs.base(); log_info(cds, dynamic)("Reserved output buffer space at : " PTR_FORMAT " [%d bytes]", p2i(buffer_base), (int)total); + MetaspaceShared::set_shared_rs(rs); // At run time, we will mmap the dynamic archive at target_space_bottom. // However, at dump time, we may not be able to write into the target_space, @@ -788,6 +809,7 @@ void DynamicArchiveBuilder::make_klasses_shareable() { int i, count = _klasses->length(); + InstanceKlass::disable_method_binary_search(); for (i = 0; i < count; i++) { InstanceKlass* ik = _klasses->at(i); sort_methods(ik); @@ -847,18 +869,24 @@ } #ifdef ASSERT - { + if (ik->methods() != NULL) { for (int m = 0; m < ik->methods()->length(); m++) { Symbol* name = ik->methods()->at(m)->name(); assert(MetaspaceShared::is_in_shared_metaspace(name) || is_in_buffer_space(name), "must be"); } } + if (ik->default_methods() != NULL) { + for (int m = 0; m < ik->default_methods()->length(); m++) { + Symbol* name = ik->default_methods()->at(m)->name(); + assert(MetaspaceShared::is_in_shared_metaspace(name) || is_in_buffer_space(name), "must be"); + } + } #endif Thread* THREAD = Thread::current(); - Method::sort_methods(ik->methods()); + Method::sort_methods(ik->methods(), /*set_idnums=*/true, dynamic_dump_method_comparator); if (ik->default_methods() != NULL) { - Method::sort_methods(ik->default_methods(), /*set_idnums=*/false); + Method::sort_methods(ik->default_methods(), /*set_idnums=*/false, dynamic_dump_method_comparator); } ik->vtable().initialize_vtable(true, THREAD); assert(!HAS_PENDING_EXCEPTION, "cannot fail"); ik->itable().initialize_itable(true, THREAD); assert(!HAS_PENDING_EXCEPTION, "cannot fail"); @@ -902,14 +930,60 @@ } }; - void DynamicArchiveBuilder::relocate_buffer_to_target() { RelocateBufferToTarget patcher(this, (address*)_alloc_bottom, _buffer_to_target_delta); - _ptrmap.iterate(&patcher); + ArchivePtrMarker::ptrmap()->iterate(&patcher); + + Array* table = FileMapInfo::shared_path_table().table(); + SharedPathTable runtime_table(to_target(table), FileMapInfo::shared_path_table().size()); + _header->set_shared_path_table(runtime_table); + + address relocatable_base = (address)SharedBaseAddress; + address relocatable_end = (address)(current_dump_space()->top()) + _buffer_to_target_delta; + + intx addr_delta = MetaspaceShared::final_delta(); + if (addr_delta == 0) { + ArchivePtrMarker::compact(relocatable_base, relocatable_end); + } else { + // The base archive is NOT mapped at Arguments::default_SharedBaseAddress() (due to ASLR). + // This means that the current content of the dynamic archive is based on a random + // address. Let's relocate all the pointers, so that it can be mapped to + // Arguments::default_SharedBaseAddress() without runtime relocation. + // + // Note: both the base and dynamic archive are written with + // FileMapHeader::_shared_base_address == Arguments::default_SharedBaseAddress() + + // Patch all pointers that are marked by ptrmap within this region, + // where we have just dumped all the metaspace data. + address patch_base = (address)_alloc_bottom; + address patch_end = (address)current_dump_space()->top(); - Array* table = _header->shared_path_table().table(); - table = to_target(table); - _header->relocate_shared_path_table(table); + // the current value of the pointers to be patched must be within this + // range (i.e., must point to either the top archive (as currently mapped), or to the + // (targeted address of) the top archive) + address valid_old_base = relocatable_base; + address valid_old_end = relocatable_end; + size_t base_plus_top_size = valid_old_end - valid_old_base; + size_t top_size = patch_end - patch_base; + size_t base_size = base_plus_top_size - top_size; + assert(base_plus_top_size > base_size, "no overflow"); + assert(base_plus_top_size > top_size, "no overflow"); + + // after patching, the pointers must point inside this range + // (the requested location of the archive, as mapped at runtime). + address valid_new_base = (address)Arguments::default_SharedBaseAddress(); + address valid_new_end = valid_new_base + base_plus_top_size; + + log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT "] to " + "[" INTPTR_FORMAT " - " INTPTR_FORMAT "], delta = " INTX_FORMAT " bytes", + p2i(patch_base + base_size), p2i(patch_end), + p2i(valid_new_base + base_size), p2i(valid_new_end), addr_delta); + + SharedDataRelocator patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end, + valid_new_base, valid_new_end, addr_delta, ArchivePtrMarker::ptrmap()); + ArchivePtrMarker::ptrmap()->iterate(&patcher); + ArchivePtrMarker::compact(patcher.max_non_null_offset()); + } } void DynamicArchiveBuilder::write_regions(FileMapInfo* dynamic_info) { @@ -925,6 +999,7 @@ MetaspaceShared::misc_code_dump_space()->base(), MetaspaceShared::misc_code_dump_space()->used(), /*read_only=*/false,/*allow_exec=*/true); + dynamic_info->write_bitmap_region(ArchivePtrMarker::ptrmap()); } void DynamicArchiveBuilder::write_archive(char* serialized_data_start) { @@ -940,6 +1015,7 @@ const char* archive_name = Arguments::GetSharedDynamicArchivePath(); dynamic_info->open_for_write(archive_name); write_regions(dynamic_info); + dynamic_info->set_final_requested_base((char*)Arguments::default_SharedBaseAddress()); dynamic_info->set_header_crc(dynamic_info->compute_header_crc()); dynamic_info->write_header(); dynamic_info->close(); @@ -948,6 +1024,8 @@ address top = address(current_dump_space()->top()) + _buffer_to_target_delta; size_t file_size = pointer_delta(top, base, sizeof(char)); + base += MetaspaceShared::final_delta(); + top += MetaspaceShared::final_delta(); log_info(cds, dynamic)("Written dynamic archive " PTR_FORMAT " - " PTR_FORMAT " [" SIZE_FORMAT " bytes header, " SIZE_FORMAT " bytes total]", p2i(base), p2i(top), _header->header_size(), file_size); @@ -1036,79 +1114,8 @@ } -static DynamicArchiveHeader *_dynamic_header = NULL; DynamicArchiveBuilder* DynamicArchive::_builder = NULL; -void DynamicArchive::map_failed(FileMapInfo* mapinfo) { - if (mapinfo->dynamic_header() != NULL) { - os::free((void*)mapinfo->dynamic_header()); - } - delete mapinfo; -} - -// Returns the top of the mapped address space -address DynamicArchive::map() { - assert(UseSharedSpaces, "Sanity"); - - // Create the dynamic archive map info - FileMapInfo* mapinfo; - const char* filename = Arguments::GetSharedDynamicArchivePath(); - struct stat st; - address result; - if ((filename != NULL) && (os::stat(filename, &st) == 0)) { - mapinfo = new FileMapInfo(false); - if (!mapinfo->open_for_read(filename)) { - result = NULL; - } - result = map_impl(mapinfo); - if (result == NULL) { - map_failed(mapinfo); - mapinfo->restore_shared_path_table(); - } - } else { - if (filename != NULL) { - log_warning(cds, dynamic)("specified dynamic archive doesn't exist: %s", filename); - } - result = NULL; - } - return result; -} - -address DynamicArchive::map_impl(FileMapInfo* mapinfo) { - // Read header - if (!mapinfo->initialize(false)) { - return NULL; - } - - _dynamic_header = mapinfo->dynamic_header(); - int regions[] = {MetaspaceShared::rw, - MetaspaceShared::ro, - MetaspaceShared::mc}; - - size_t len = sizeof(regions)/sizeof(int); - char* saved_base[] = {NULL, NULL, NULL}; - char* top = mapinfo->map_regions(regions, saved_base, len); - if (top == NULL) { - mapinfo->unmap_regions(regions, saved_base, len); - FileMapInfo::fail_continue("Unable to use dynamic archive. Failed map_region for using -Xshare:on."); - return NULL; - } - - if (!validate(mapinfo)) { - return NULL; - } - - if (_dynamic_header == NULL) { - return NULL; - } - - intptr_t* buffer = (intptr_t*)_dynamic_header->serialized_data_start(); - ReadClosure rc(&buffer); - SymbolTable::serialize_shared_table_header(&rc, false); - SystemDictionaryShared::serialize_dictionary_headers(&rc, false); - - return (address)top; -} bool DynamicArchive::validate(FileMapInfo* dynamic_info) { // Check if the recorded base archive matches with the current one @@ -1136,11 +1143,3 @@ } return true; } - -bool DynamicArchive::is_mapped() { - return (_dynamic_header != NULL); -} - -void DynamicArchive::disable() { - _dynamic_header = NULL; -} diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/memory/dynamicArchive.hpp --- a/src/hotspot/share/memory/dynamicArchive.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/memory/dynamicArchive.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -99,13 +99,8 @@ // archive? static bool is_in_target_space(void *obj); - static address map(); - static bool is_mapped(); + static bool is_mapped() { return FileMapInfo::dynamic_info() != NULL; } static bool validate(FileMapInfo* dynamic_info); - static void disable(); -private: - static address map_impl(FileMapInfo* mapinfo); - static void map_failed(FileMapInfo* mapinfo); }; #endif // INCLUDE_CDS #endif // SHARE_VM_MEMORY_DYNAMICARCHIVE_HPP diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/memory/filemap.cpp --- a/src/hotspot/share/memory/filemap.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/memory/filemap.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -34,6 +34,7 @@ #include "logging/log.hpp" #include "logging/logStream.hpp" #include "logging/logMessage.hpp" +#include "memory/archiveUtils.inline.hpp" #include "memory/dynamicArchive.hpp" #include "memory/filemap.hpp" #include "memory/heapShared.inline.hpp" @@ -55,6 +56,7 @@ #include "runtime/vm_version.hpp" #include "services/memTracker.hpp" #include "utilities/align.hpp" +#include "utilities/bitMap.inline.hpp" #include "utilities/classpathStream.hpp" #include "utilities/defaultStream.hpp" #if INCLUDE_G1GC @@ -69,9 +71,6 @@ #define O_BINARY 0 // otherwise do nothing. #endif -extern address JVM_FunctionAtStart(); -extern address JVM_FunctionAtEnd(); - // Complain and stop. All error conditions occurring during the writing of // an archive file should stop the process. Unrecoverable errors during // the reading of the archive file should stop the process. @@ -104,12 +103,6 @@ void FileMapInfo::fail_continue(const char *msg, ...) { va_list ap; va_start(ap, msg); - if (_dynamic_archive_info == NULL) { - MetaspaceShared::set_archive_loading_failed(); - } else { - // _dynamic_archive_info has been setup after mapping the base archive - DynamicArchive::disable(); - } if (PrintSharedArchiveAndExit && _validating_shared_path_table) { // If we are doing PrintSharedArchiveAndExit and some of the classpath entries // do not validate, we can still continue "limping" to validate the remaining @@ -128,15 +121,6 @@ ls.vprint_cr(msg, ap); } } - if (_dynamic_archive_info == NULL) { - UseSharedSpaces = false; - assert(current_info() != NULL, "singleton must be registered"); - current_info()->close(); - } else { - // We are failing when loading the top archive, but the base archive should - // continue to work. - log_warning(cds, dynamic)("Unable to use shared archive. The top archive failed to load: %s", _dynamic_archive_info->_full_path); - } } va_end(ap); } @@ -227,9 +211,7 @@ _narrow_oop_base = CompressedOops::base(); _narrow_oop_shift = CompressedOops::shift(); _max_heap_size = MaxHeapSize; - _narrow_klass_base = CompressedKlassPointers::base(); _narrow_klass_shift = CompressedKlassPointers::shift(); - _shared_path_table = mapinfo->_shared_path_table; if (HeapShared::is_heap_object_archiving_allowed()) { _heap_end = CompressedOops::end(); } @@ -249,11 +231,16 @@ _verify_local = BytecodeVerificationLocal; _verify_remote = BytecodeVerificationRemote; _has_platform_or_app_classes = ClassLoaderExt::has_platform_or_app_classes(); - _shared_base_address = SharedBaseAddress; + _requested_base_address = (char*)SharedBaseAddress; + _mapped_base_address = (char*)SharedBaseAddress; _allow_archiving_with_java_agent = AllowArchivingWithJavaAgent; // the following 2 fields will be set in write_header for dynamic archive header _base_archive_name_size = 0; _base_archive_is_default = false; + + if (!DynamicDumpSharedSpaces) { + set_shared_path_table(mapinfo->_shared_path_table); + } } void SharedClassPathEntry::init_as_non_existent(const char* path, TRAPS) { @@ -615,9 +602,11 @@ return path_array; } -bool FileMapInfo::fail(const char* msg, const char* name) { +bool FileMapInfo::classpath_failure(const char* msg, const char* name) { ClassLoader::trace_class_path(msg, name); - MetaspaceShared::set_archive_loading_failed(); + if (PrintSharedArchiveAndExit) { + MetaspaceShared::set_archive_loading_failed(); + } return false; } @@ -692,7 +681,7 @@ if (mismatch) { // The paths are different - return fail("[BOOT classpath mismatch, actual =", runtime_boot_path); + return classpath_failure("[BOOT classpath mismatch, actual =", runtime_boot_path); } return true; } @@ -703,7 +692,7 @@ int rp_len = num_paths(appcp); bool mismatch = false; if (rp_len < shared_app_paths_len) { - return fail("Run time APP classpath is shorter than the one at dump time: ", appcp); + return classpath_failure("Run time APP classpath is shorter than the one at dump time: ", appcp); } if (shared_app_paths_len != 0 && rp_len != 0) { // Prefix is OK: E.g., dump with -cp foo.jar, but run with -cp foo.jar:bar.jar. @@ -711,7 +700,7 @@ GrowableArray* rp_array = create_path_array(appcp); if (rp_array->length() == 0) { // None of the jar file specified in the runtime -cp exists. - return fail("None of the jar file specified in the runtime -cp exists: -Djava.class.path=", appcp); + return classpath_failure("None of the jar file specified in the runtime -cp exists: -Djava.class.path=", appcp); } // Handling of non-existent entries in the classpath: we eliminate all the non-existent @@ -726,7 +715,7 @@ int j = header()->app_class_paths_start_index(); mismatch = check_paths(j, shared_app_paths_len, rp_array); if (mismatch) { - return fail("[APP classpath mismatch, actual: -Djava.class.path=", appcp); + return classpath_failure("[APP classpath mismatch, actual: -Djava.class.path=", appcp); } } return true; @@ -952,8 +941,8 @@ // Read the FileMapInfo information from the file. -bool FileMapInfo::init_from_file(int fd, bool is_static) { - size_t sz = is_static ? sizeof(FileMapHeader) : sizeof(DynamicArchiveHeader); +bool FileMapInfo::init_from_file(int fd) { + size_t sz = is_static() ? sizeof(FileMapHeader) : sizeof(DynamicArchiveHeader); size_t n = os::read(fd, header(), (unsigned int)sz); if (n != sz) { fail_continue("Unable to read the file header."); @@ -965,7 +954,7 @@ return false; } - unsigned int expected_magic = is_static ? CDS_ARCHIVE_MAGIC : CDS_DYNAMIC_ARCHIVE_MAGIC; + unsigned int expected_magic = is_static() ? CDS_ARCHIVE_MAGIC : CDS_DYNAMIC_ARCHIVE_MAGIC; if (header()->magic() != expected_magic) { log_info(cds)("_magic expected: 0x%08x", expected_magic); log_info(cds)(" actual: 0x%08x", header()->magic()); @@ -1016,7 +1005,7 @@ _file_offset = n + header()->base_archive_name_size(); // accounts for the size of _base_archive_name - if (is_static) { + if (is_static()) { // just checking the last region is sufficient since the archive is written // in sequential order size_t len = lseek(fd, 0, SEEK_END); @@ -1026,8 +1015,6 @@ fail_continue("The shared archive file has been truncated."); return false; } - - SharedBaseAddress = header()->shared_base_address(); } return true; @@ -1040,23 +1027,27 @@ } // Read the FileMapInfo information from the file. -bool FileMapInfo::open_for_read(const char* path) { +bool FileMapInfo::open_for_read() { if (_file_open) { return true; } - if (path == NULL) { + if (is_static()) { _full_path = Arguments::GetSharedArchivePath(); } else { - _full_path = path; + _full_path = Arguments::GetSharedDynamicArchivePath(); } int fd = os::open(_full_path, O_RDONLY | O_BINARY, 0); if (fd < 0) { - if (errno == ENOENT) { - // Not locating the shared archive is ok. - fail_continue("Specified shared archive not found (%s).", _full_path); + if (is_static()) { + if (errno == ENOENT) { + // Not locating the shared archive is ok. + fail_continue("Specified shared archive not found (%s).", _full_path); + } else { + fail_continue("Failed to open shared archive file (%s).", + os::strerror(errno)); + } } else { - fail_continue("Failed to open shared archive file (%s).", - os::strerror(errno)); + log_warning(cds, dynamic)("specified dynamic archive doesn't exist: %s", _full_path); } return false; } @@ -1127,25 +1118,35 @@ } } -void FileMapRegion::init(bool is_heap_region, char* base, size_t size, bool read_only, +size_t FileMapRegion::used_aligned() const { + return align_up(used(), os::vm_allocation_granularity()); +} + +void FileMapRegion::init(int region_index, char* base, size_t size, bool read_only, bool allow_exec, int crc) { - _is_heap_region = is_heap_region; + _is_heap_region = HeapShared::is_heap_region(region_index); + _is_bitmap_region = (region_index == MetaspaceShared::bm); + _mapping_offset = 0; - if (is_heap_region) { + if (_is_heap_region) { assert(!DynamicDumpSharedSpaces, "must be"); assert((base - (char*)CompressedKlassPointers::base()) % HeapWordSize == 0, "Sanity"); if (base != NULL) { - _addr._offset = (intx)CompressedOops::encode_not_null((oop)base); - } else { - _addr._offset = 0; + _mapping_offset = (size_t)CompressedOops::encode_not_null((oop)base); + assert(_mapping_offset >> 32 == 0, "must be 32-bit only"); } } else { - _addr._base = base; + if (base != NULL) { + assert(base >= (char*)SharedBaseAddress, "must be"); + _mapping_offset = base - (char*)SharedBaseAddress; + } } _used = size; _read_only = read_only; _allow_exec = allow_exec; _crc = crc; + _mapped_from_file = false; + _mapped_base = NULL; } void FileMapInfo::write_region(int region, char* base, size_t size, @@ -1153,25 +1154,47 @@ Arguments::assert_is_dumping_archive(); FileMapRegion* si = space_at(region); - char* target_base = base; - if (DynamicDumpSharedSpaces) { - assert(!HeapShared::is_heap_region(region), "dynamic archive doesn't support heap regions"); - target_base = DynamicArchive::buffer_to_target(base); + char* target_base; + + if (region == MetaspaceShared::bm) { + target_base = NULL; // always NULL for bm region. + } else { + if (DynamicDumpSharedSpaces) { + assert(!HeapShared::is_heap_region(region), "dynamic archive doesn't support heap regions"); + target_base = DynamicArchive::buffer_to_target(base); + } else { + target_base = base; + } } si->set_file_offset(_file_offset); - log_info(cds)("Shared file region %d: " SIZE_FORMAT_HEX_W(08) + char* requested_base = (target_base == NULL) ? NULL : target_base + MetaspaceShared::final_delta(); + log_info(cds)("Shared file region %d: " SIZE_FORMAT_HEX_W(08) " bytes, addr " INTPTR_FORMAT " file offset " SIZE_FORMAT_HEX_W(08), - region, size, p2i(target_base), _file_offset); + region, size, p2i(requested_base), _file_offset); int crc = ClassLoader::crc32(0, base, (jint)size); - si->init(HeapShared::is_heap_region(region), target_base, size, read_only, allow_exec, crc); + si->init(region, target_base, size, read_only, allow_exec, crc); if (base != NULL) { write_bytes_aligned(base, size); } } + +void FileMapInfo::write_bitmap_region(const CHeapBitMap* ptrmap) { + ResourceMark rm; + size_t size_in_bits = ptrmap->size(); + size_t size_in_bytes = ptrmap->size_in_bytes(); + uintptr_t* buffer = (uintptr_t*)NEW_RESOURCE_ARRAY(char, size_in_bytes); + ptrmap->write_to(buffer, size_in_bytes); + header()->set_ptrmap_size_in_bits(size_in_bits); + + log_info(cds)("ptrmap = " INTPTR_FORMAT " (" SIZE_FORMAT " bytes)", + p2i(buffer), size_in_bytes); + write_region(MetaspaceShared::bm, (char*)buffer, size_in_bytes, /*read_only=*/true, /*allow_exec=*/false); +} + // Write out the given archive heap memory regions. GC code combines multiple // consecutive archive GC regions into one MemRegion whenever possible and // produces the 'heap_mem' array. @@ -1229,11 +1252,13 @@ total_size += size; } - log_info(cds)("Archive heap region %d " INTPTR_FORMAT " - " INTPTR_FORMAT " = " SIZE_FORMAT_W(8) " bytes", + log_info(cds)("Archive heap region %d: " INTPTR_FORMAT " - " INTPTR_FORMAT " = " SIZE_FORMAT_W(8) " bytes", i, p2i(start), p2i(start + size), size); write_region(i, start, size, false, false); if (size > 0) { - space_at(i)->init_oopmap(oopmaps->at(arr_idx)._oopmap, + address oopmap = oopmaps->at(arr_idx)._oopmap; + assert(oopmap >= (address)SharedBaseAddress, "must be"); + space_at(i)->init_oopmap(oopmap - (address)SharedBaseAddress, oopmaps->at(arr_idx)._oopmap_size_in_bits); } } @@ -1285,6 +1310,9 @@ align_file_position(); } +void FileMapInfo::set_final_requested_base(char* b) { + header()->set_final_requested_base(b); +} // Close the shared archive file. This does NOT unmap mapped regions. @@ -1331,94 +1359,197 @@ return true; } -// Map the whole region at once, assumed to be allocated contiguously. -ReservedSpace FileMapInfo::reserve_shared_memory() { - char* requested_addr = region_addr(0); - size_t size = FileMapInfo::core_spaces_size(); - - // Reserve the space first, then map otherwise map will go right over some - // other reserved memory (like the code cache). - ReservedSpace rs(size, os::vm_allocation_granularity(), false, requested_addr); - if (!rs.is_reserved()) { - fail_continue("Unable to reserve shared space at required address " - INTPTR_FORMAT, p2i(requested_addr)); - return rs; - } - // the reserved virtual memory is for mapping class data sharing archive - MemTracker::record_virtual_memory_type((address)rs.base(), mtClassShared); - - return rs; -} - // Memory map a region in the address space. -static const char* shared_region_name[] = { "MiscData", "ReadWrite", "ReadOnly", "MiscCode", +static const char* shared_region_name[] = { "MiscData", "ReadWrite", "ReadOnly", "MiscCode", "Bitmap", "String1", "String2", "OpenArchive1", "OpenArchive2" }; -char* FileMapInfo::map_regions(int regions[], char* saved_base[], size_t len) { - char* prev_top = NULL; - char* curr_base; - char* curr_top; - int i = 0; - for (i = 0; i < (int)len; i++) { - curr_base = map_region(regions[i], &curr_top); - if (curr_base == NULL) { - return NULL; +MapArchiveResult FileMapInfo::map_regions(int regions[], int num_regions, char* mapped_base_address, ReservedSpace rs) { + DEBUG_ONLY(FileMapRegion* last_region = NULL); + intx addr_delta = mapped_base_address - header()->requested_base_address(); + + // Make sure we don't attempt to use header()->mapped_base_address() unless + // it's been successfully mapped. + DEBUG_ONLY(header()->set_mapped_base_address((char*)(uintptr_t)0xdeadbeef);) + + for (int r = 0; r < num_regions; r++) { + int idx = regions[r]; + MapArchiveResult result = map_region(idx, addr_delta, mapped_base_address, rs); + if (result != MAP_ARCHIVE_SUCCESS) { + return result; } - if (i > 0) { - // We require that mc->rw->ro->md to be laid out consecutively, with no - // gaps between them. That way, we can ensure that the OS won't be able to - // allocate any new memory spaces inside _shared_metaspace_{base,top}, which - // would mess up the simple comparision in MetaspaceShared::is_in_shared_metaspace(). - assert(curr_base == prev_top, "must be"); - } - log_info(cds)("Mapped region #%d at base %p top %p", regions[i], curr_base, curr_top); - saved_base[i] = curr_base; - prev_top = curr_top; + FileMapRegion* si = space_at(idx); + DEBUG_ONLY(if (last_region != NULL) { + // Ensure that the OS won't be able to allocate new memory spaces between any mapped + // regions, or else it would mess up the simple comparision in MetaspaceObj::is_shared(). + assert(si->mapped_base() == last_region->mapped_end(), "must have no gaps"); + } + last_region = si;) + log_info(cds)("Mapped %s region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT " (%s)", is_static() ? "static " : "dynamic", + idx, p2i(si->mapped_base()), p2i(si->mapped_end()), + shared_region_name[idx]); + } - return curr_top; + + DEBUG_ONLY(if (addr_delta == 0 && ArchiveRelocationMode == 1) { + // This is for simulating mmap failures at the requested address. We do it here (instead + // of MetaspaceShared::map_archives) so we can thoroughly test the code for failure handling + // (releasing all allocated resource, etc). + log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address"); + return MAP_ARCHIVE_MMAP_FAILURE; + }); + + header()->set_mapped_base_address(header()->requested_base_address() + addr_delta); + if (addr_delta != 0 && !relocate_pointers(addr_delta)) { + return MAP_ARCHIVE_OTHER_FAILURE; + } + + return MAP_ARCHIVE_SUCCESS; } -char* FileMapInfo::map_region(int i, char** top_ret) { +bool FileMapInfo::read_region(int i, char* base, size_t size) { + assert(MetaspaceShared::use_windows_memory_mapping(), "used by windows only"); + FileMapRegion* si = space_at(i); + log_info(cds)("Commit %s region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT " (%s)%s", + is_static() ? "static " : "dynamic", i, p2i(base), p2i(base + size), + shared_region_name[i], si->allow_exec() ? " exec" : ""); + if (!os::commit_memory(base, size, si->allow_exec())) { + log_error(cds)("Failed to commit %s region #%d (%s)", is_static() ? "static " : "dynamic", + i, shared_region_name[i]); + return false; + } + if (lseek(_fd, (long)si->file_offset(), SEEK_SET) != (int)si->file_offset() || + read_bytes(base, size) != size) { + return false; + } + return true; +} + +MapArchiveResult FileMapInfo::map_region(int i, intx addr_delta, char* mapped_base_address, ReservedSpace rs) { assert(!HeapShared::is_heap_region(i), "sanity"); FileMapRegion* si = space_at(i); - size_t used = si->used(); - size_t alignment = os::vm_allocation_granularity(); - size_t size = align_up(used, alignment); - char *requested_addr = region_addr(i); + size_t size = si->used_aligned(); + char *requested_addr = mapped_base_address + si->mapping_offset(); + assert(si->mapped_base() == NULL, "must be not mapped yet"); + assert(requested_addr != NULL, "must be specified"); + + si->set_mapped_from_file(false); -#ifdef _WINDOWS - // Windows cannot remap read-only shared memory to read-write when required for - // RedefineClasses, which is also used by JFR. Always map windows regions as RW. - si->set_read_only(false); -#else - // If a tool agent is in use (debugging enabled), or JFR, we must map the address space RW - if (JvmtiExport::can_modify_any_class() || JvmtiExport::can_walk_any_space() || - Arguments::has_jfr_option()) { + if (MetaspaceShared::use_windows_memory_mapping()) { + // Windows cannot remap read-only shared memory to read-write when required for + // RedefineClasses, which is also used by JFR. Always map windows regions as RW. + si->set_read_only(false); + } else if (JvmtiExport::can_modify_any_class() || JvmtiExport::can_walk_any_space() || + Arguments::has_jfr_option()) { + // If a tool agent is in use (debugging enabled), or JFR, we must map the address space RW si->set_read_only(false); + } else if (addr_delta != 0) { + si->set_read_only(false); // Need to patch the pointers + } + + if (rs.is_reserved()) { + assert(rs.contains(requested_addr) && rs.contains(requested_addr + size - 1), "must be"); + MemTracker::record_virtual_memory_type((address)requested_addr, mtClassShared); } -#endif // _WINDOWS - // map the contents of the CDS archive in this memory - char *base = os::map_memory(_fd, _full_path, si->file_offset(), - requested_addr, size, si->read_only(), - si->allow_exec()); - if (base == NULL || base != requested_addr) { - fail_continue("Unable to map %s shared space at required address.", shared_region_name[i]); - _memory_mapping_failed = true; - return NULL; + if (MetaspaceShared::use_windows_memory_mapping() && addr_delta != 0) { + // This is the second time we try to map the archive(s). We have already created a ReservedSpace + // that covers all the FileMapRegions to ensure all regions can be mapped. However, Windows + // can't mmap into a ReservedSpace, so we just os::read() the data. We're going to patch all the + // regions anyway, so there's no benefit for mmap anyway. + if (!read_region(i, requested_addr, size)) { + return MAP_ARCHIVE_OTHER_FAILURE; // oom or I/O error. + } + } else { + char* base = os::map_memory(_fd, _full_path, si->file_offset(), + requested_addr, size, si->read_only(), + si->allow_exec()); + if (base != requested_addr) { + log_info(cds)("Unable to map %s shared space at required address.", shared_region_name[i]); + _memory_mapping_failed = true; + return MAP_ARCHIVE_MMAP_FAILURE; + } + si->set_mapped_from_file(true); } -#ifdef _WINDOWS - // This call is Windows-only because the memory_type gets recorded for the other platforms - // in method FileMapInfo::reserve_shared_memory(), which is not called on Windows. - MemTracker::record_virtual_memory_type((address)base, mtClassShared); -#endif + si->set_mapped_base(requested_addr); + + if (!rs.is_reserved()) { + // When mapping on Windows with (addr_delta == 0), we don't reserve the address space for the regions + // (Windows can't mmap into a ReservedSpace). In this case, NMT requires we call it after + // os::map_memory has succeeded. + assert(MetaspaceShared::use_windows_memory_mapping(), "Windows memory mapping only"); + MemTracker::record_virtual_memory_type((address)requested_addr, mtClassShared); + } if (VerifySharedSpaces && !verify_region_checksum(i)) { + return MAP_ARCHIVE_OTHER_FAILURE; + } + + return MAP_ARCHIVE_SUCCESS; +} + +char* FileMapInfo::map_relocation_bitmap(size_t& bitmap_size) { + FileMapRegion* si = space_at(MetaspaceShared::bm); + bitmap_size = si->used_aligned(); + bool read_only = true, allow_exec = false; + char* requested_addr = NULL; // allow OS to pick any location + char* bitmap_base = os::map_memory(_fd, _full_path, si->file_offset(), + requested_addr, bitmap_size, read_only, allow_exec); + if (bitmap_base == NULL) { + log_error(cds)("failed to map relocation bitmap"); return NULL; } - *top_ret = base + size; - return base; + if (VerifySharedSpaces && !region_crc_check(bitmap_base, bitmap_size, si->crc())) { + log_error(cds)("relocation bitmap CRC error"); + if (!os::unmap_memory(bitmap_base, bitmap_size)) { + fatal("os::unmap_memory of relocation bitmap failed"); + } + return NULL; + } + + return bitmap_base; +} + +bool FileMapInfo::relocate_pointers(intx addr_delta) { + log_debug(cds, reloc)("runtime archive relocation start"); + size_t bitmap_size; + char* bitmap_base = map_relocation_bitmap(bitmap_size); + + if (bitmap_base == NULL) { + return false; + } else { + size_t ptrmap_size_in_bits = header()->ptrmap_size_in_bits(); + log_debug(cds, reloc)("mapped relocation bitmap @ " INTPTR_FORMAT " (" SIZE_FORMAT + " bytes = " SIZE_FORMAT " bits)", + p2i(bitmap_base), bitmap_size, ptrmap_size_in_bits); + + BitMapView ptrmap((BitMap::bm_word_t*)bitmap_base, ptrmap_size_in_bits); + + // Patch all pointers in the the mapped region that are marked by ptrmap. + address patch_base = (address)mapped_base(); + address patch_end = (address)mapped_end(); + + // the current value of the pointers to be patched must be within this + // range (i.e., must be between the requesed base address, and the of the current archive). + // Note: top archive may point to objects in the base archive, but not the other way around. + address valid_old_base = (address)header()->requested_base_address(); + address valid_old_end = valid_old_base + mapping_end_offset(); + + // after patching, the pointers must point inside this range + // (the requested location of the archive, as mapped at runtime). + address valid_new_base = (address)header()->mapped_base_address(); + address valid_new_end = (address)mapped_end(); + + SharedDataRelocator patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end, + valid_new_base, valid_new_end, addr_delta); + ptrmap.iterate(&patcher); + + if (!os::unmap_memory(bitmap_base, bitmap_size)) { + fatal("os::unmap_memory of relocation bitmap failed"); + } + log_debug(cds, reloc)("runtime archive relocation done"); + return true; + } } size_t FileMapInfo::read_bytes(void* buffer, size_t count) { @@ -1434,10 +1565,13 @@ } address FileMapInfo::decode_start_address(FileMapRegion* spc, bool with_current_oop_encoding_mode) { + size_t offset = spc->mapping_offset(); + assert((offset >> 32) == 0, "must be 32-bit only"); + uint n = (uint)offset; if (with_current_oop_encoding_mode) { - return (address)CompressedOops::decode_not_null(spc->offset()); + return (address)CompressedOops::decode_not_null(n); } else { - return (address)HeapShared::decode_from_archive(spc->offset()); + return (address)HeapShared::decode_from_archive(n); } } @@ -1705,7 +1839,7 @@ int first_region_idx) { for (int i=0; ioopmap(), + HeapShared::patch_archived_heap_embedded_pointers(ranges[i], (address)(SharedBaseAddress + si->oopmap_offset()), si->oopmap_size_in_bits()); } } @@ -1759,11 +1893,10 @@ } } -void FileMapInfo::unmap_regions(int regions[], char* saved_base[], size_t len) { - for (int i = 0; i < (int)len; i++) { - if (saved_base[i] != NULL) { - unmap_region(regions[i]); - } +void FileMapInfo::unmap_regions(int regions[], int num_regions) { + for (int r = 0; r < num_regions; r++) { + int idx = regions[r]; + unmap_region(idx); } } @@ -1772,16 +1905,17 @@ void FileMapInfo::unmap_region(int i) { assert(!HeapShared::is_heap_region(i), "sanity"); FileMapRegion* si = space_at(i); + char* mapped_base = si->mapped_base(); size_t used = si->used(); size_t size = align_up(used, os::vm_allocation_granularity()); - if (used == 0) { - return; - } - - char* addr = region_addr(i); - if (!os::unmap_memory(addr, size)) { - fail_stop("Unable to unmap shared space."); + if (mapped_base != NULL && size > 0 && si->mapped_from_file()) { + log_info(cds)("Unmapping region #%d at base " INTPTR_FORMAT " (%s)", i, p2i(mapped_base), + shared_region_name[i]); + if (!os::unmap_memory(mapped_base, size)) { + fatal("os::unmap_memory failed"); + } + si->set_mapped_base(NULL); } } @@ -1813,7 +1947,7 @@ // [1] validate_header() - done here. // [2] validate_shared_path_table - this is done later, because the table is in the RW // region of the archive, which is not mapped yet. -bool FileMapInfo::initialize(bool is_static) { +bool FileMapInfo::initialize() { assert(UseSharedSpaces, "UseSharedSpaces expected."); if (JvmtiExport::should_post_class_file_load_hook() && JvmtiExport::has_early_class_hook_env()) { @@ -1828,11 +1962,10 @@ if (!open_for_read()) { return false; } - - init_from_file(_fd, is_static); - // UseSharedSpaces could be disabled if the checking of some of the header fields in - // init_from_file has failed. - if (!UseSharedSpaces || !validate_header(is_static)) { + if (!init_from_file(_fd)) { + return false; + } + if (!validate_header()) { return false; } return true; @@ -1845,10 +1978,18 @@ return si->used() > 0 ? (char*)start_address_as_decoded_with_current_oop_encoding_mode(si) : NULL; } else { - return si->base(); + return si->mapped_base(); } } +FileMapRegion* FileMapInfo::first_core_space() const { + return is_static() ? space_at(MetaspaceShared::mc) : space_at(MetaspaceShared::rw); +} + +FileMapRegion* FileMapInfo::last_core_space() const { + return is_static() ? space_at(MetaspaceShared::md) : space_at(MetaspaceShared::mc); +} + int FileMapHeader::compute_crc() { char* start = (char*)this; // start computing from the field after _crc @@ -1860,7 +2001,6 @@ // This function should only be called during run time with UseSharedSpaces enabled. bool FileMapHeader::validate() { - if (_obj_alignment != ObjectAlignmentInBytes) { FileMapInfo::fail_continue("The shared archive file's ObjectAlignmentInBytes of %d" " does not equal the current ObjectAlignmentInBytes of " INTX_FORMAT ".", @@ -1913,7 +2053,7 @@ return true; } -bool FileMapInfo::validate_header(bool is_static) { +bool FileMapInfo::validate_header() { return header()->validate(); } @@ -1932,18 +2072,14 @@ // Unmap mapped regions of shared space. void FileMapInfo::stop_sharing_and_unmap(const char* msg) { - MetaspaceShared::set_shared_metaspace_range(NULL, NULL); + MetaspaceShared::set_shared_metaspace_range(NULL, NULL, NULL); FileMapInfo *map_info = FileMapInfo::current_info(); if (map_info) { map_info->fail_continue("%s", msg); for (int i = 0; i < MetaspaceShared::num_non_heap_spaces; i++) { if (!HeapShared::is_heap_region(i)) { - char *addr = map_info->region_addr(i); - if (addr != NULL) { - map_info->unmap_region(i); - map_info->space_at(i)->mark_invalid(); - } + map_info->unmap_region(i); } } // Dealloc the archive heap regions only without unmapping. The regions are part diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/memory/filemap.hpp --- a/src/hotspot/share/memory/filemap.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/memory/filemap.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -43,6 +43,8 @@ static const int JVM_IDENT_MAX = 256; +class CHeapBitMap; + class SharedClassPathEntry { enum { modules_image_entry, @@ -104,6 +106,9 @@ Array* _table; int _size; public: + SharedPathTable() : _table(NULL), _size(0) {} + SharedPathTable(Array* table, int size) : _table(table), _size(size) {} + void dumptime_init(ClassLoaderData* loader_data, Thread* THREAD); void metaspace_pointers_do(MetaspaceClosure* it); @@ -138,25 +143,29 @@ } // Accessors - int crc() const { return _crc; } - size_t file_offset() const { return _file_offset; } - char* base() const { assert_is_not_heap_region(); return _addr._base; } - narrowOop offset() const { assert_is_heap_region(); return (narrowOop)(_addr._offset); } - size_t used() const { return _used; } - bool read_only() const { return _read_only != 0; } - bool allow_exec() const { return _allow_exec != 0; } - void* oopmap() const { return _oopmap; } - size_t oopmap_size_in_bits() const { return _oopmap_size_in_bits; } + int crc() const { return _crc; } + size_t file_offset() const { return _file_offset; } + size_t mapping_offset() const { return _mapping_offset; } + size_t mapping_end_offset() const { return _mapping_offset + used_aligned(); } + size_t used() const { return _used; } + size_t used_aligned() const; // aligned up to os::vm_allocation_granularity() + char* mapped_base() const { assert_is_not_heap_region(); return _mapped_base; } + char* mapped_end() const { return mapped_base() + used_aligned(); } + bool read_only() const { return _read_only != 0; } + bool allow_exec() const { return _allow_exec != 0; } + bool mapped_from_file() const { return _mapped_from_file != 0; } + size_t oopmap_offset() const { assert_is_heap_region(); return _oopmap_offset; } + size_t oopmap_size_in_bits() const { assert_is_heap_region(); return _oopmap_size_in_bits; } - void set_file_offset(size_t s) { _file_offset = s; } - void set_read_only(bool v) { _read_only = v; } - void mark_invalid() { _addr._base = NULL; } - - void init(bool is_heap_region, char* base, size_t size, bool read_only, + void set_file_offset(size_t s) { _file_offset = s; } + void set_read_only(bool v) { _read_only = v; } + void set_mapped_base(char* p) { _mapped_base = p; } + void set_mapped_from_file(bool v) { _mapped_from_file = v; } + void init(int region_index, char* base, size_t size, bool read_only, bool allow_exec, int crc); - void init_oopmap(void* map, size_t size_in_bits) { - _oopmap = map; + void init_oopmap(size_t oopmap_offset, size_t size_in_bits) { + _oopmap_offset = oopmap_offset; _oopmap_size_in_bits = size_in_bits; } }; @@ -178,13 +187,10 @@ uintx _max_heap_size; // java max heap size during dumping CompressedOops::Mode _narrow_oop_mode; // compressed oop encoding mode int _narrow_klass_shift; // save narrow klass base and shift - address _narrow_klass_base; - char* _misc_data_patching_start; - char* _serialized_data_start; // Data accessed using {ReadClosure,WriteClosure}::serialize() - address _i2i_entry_code_buffers; + size_t _misc_data_patching_offset; + size_t _serialized_data_offset; // Data accessed using {ReadClosure,WriteClosure}::serialize() + size_t _i2i_entry_code_buffers_offset; size_t _i2i_entry_code_buffers_size; - size_t _core_spaces_size; // number of bytes allocated by the core spaces - // (mc, md, ro, rw and od). address _heap_end; // heap end at dump time. bool _base_archive_is_default; // indicates if the base archive is the system default one @@ -202,7 +208,8 @@ // check_nonempty_dir_in_shared_path_table() // validate_shared_path_table() // validate_non_existent_class_paths() - SharedPathTable _shared_path_table; + size_t _shared_path_table_offset; + int _shared_path_table_size; jshort _app_class_paths_start_index; // Index of first app classpath entry jshort _app_module_paths_start_index; // Index of first module path entry @@ -211,9 +218,19 @@ bool _verify_local; // BytecodeVerificationLocal setting bool _verify_remote; // BytecodeVerificationRemote setting bool _has_platform_or_app_classes; // Archive contains app classes - size_t _shared_base_address; // SharedBaseAddress used at dump time + char* _requested_base_address; // Archive relocation is not necessary if we map with this base address. + char* _mapped_base_address; // Actual base address where archive is mapped. + bool _allow_archiving_with_java_agent; // setting of the AllowArchivingWithJavaAgent option + size_t _ptrmap_size_in_bits; // Size of pointer relocation bitmap + char* from_mapped_offset(size_t offset) const { + return mapped_base_address() + offset; + } + void set_mapped_offset(char* p, size_t *offset) { + assert(p >= mapped_base_address(), "sanity"); + *offset = p - mapped_base_address(); + } public: // Accessors -- fields declared in CDSFileMapHeaderBase unsigned int magic() const {return _magic;} @@ -234,19 +251,19 @@ uintx max_heap_size() const { return _max_heap_size; } CompressedOops::Mode narrow_oop_mode() const { return _narrow_oop_mode; } int narrow_klass_shift() const { return _narrow_klass_shift; } - address narrow_klass_base() const { return _narrow_klass_base; } - char* misc_data_patching_start() const { return _misc_data_patching_start; } - char* serialized_data_start() const { return _serialized_data_start; } - address i2i_entry_code_buffers() const { return _i2i_entry_code_buffers; } + address narrow_klass_base() const { return (address)mapped_base_address(); } + char* misc_data_patching_start() const { return from_mapped_offset(_misc_data_patching_offset); } + char* serialized_data_start() const { return from_mapped_offset(_serialized_data_offset); } + address i2i_entry_code_buffers() const { return (address)from_mapped_offset(_i2i_entry_code_buffers_offset); } size_t i2i_entry_code_buffers_size() const { return _i2i_entry_code_buffers_size; } - size_t core_spaces_size() const { return _core_spaces_size; } address heap_end() const { return _heap_end; } bool base_archive_is_default() const { return _base_archive_is_default; } const char* jvm_ident() const { return _jvm_ident; } size_t base_archive_name_size() const { return _base_archive_name_size; } - size_t shared_base_address() const { return _shared_base_address; } + char* requested_base_address() const { return _requested_base_address; } + char* mapped_base_address() const { return _mapped_base_address; } bool has_platform_or_app_classes() const { return _has_platform_or_app_classes; } - SharedPathTable shared_path_table() const { return _shared_path_table; } + size_t ptrmap_size_in_bits() const { return _ptrmap_size_in_bits; } // FIXME: These should really return int jshort max_used_path_index() const { return _max_used_path_index; } @@ -254,27 +271,32 @@ jshort app_class_paths_start_index() const { return _app_class_paths_start_index; } jshort num_module_paths() const { return _num_module_paths; } - void set_core_spaces_size(size_t s) { _core_spaces_size = s; } void set_has_platform_or_app_classes(bool v) { _has_platform_or_app_classes = v; } - void set_misc_data_patching_start(char* p) { _misc_data_patching_start = p; } - void set_serialized_data_start(char* p) { _serialized_data_start = p; } + void set_misc_data_patching_start(char* p) { set_mapped_offset(p, &_misc_data_patching_offset); } + void set_serialized_data_start(char* p) { set_mapped_offset(p, &_serialized_data_offset); } void set_base_archive_name_size(size_t s) { _base_archive_name_size = s; } void set_base_archive_is_default(bool b) { _base_archive_is_default = b; } void set_header_size(size_t s) { _header_size = s; } - + void set_ptrmap_size_in_bits(size_t s) { _ptrmap_size_in_bits = s; } + void set_mapped_base_address(char* p) { _mapped_base_address = p; } void set_i2i_entry_code_buffers(address p, size_t s) { - _i2i_entry_code_buffers = p; + set_mapped_offset((char*)p, &_i2i_entry_code_buffers_offset); _i2i_entry_code_buffers_size = s; } - void relocate_shared_path_table(Array* t) { - assert(DynamicDumpSharedSpaces, "only"); - _shared_path_table.set_table(t); + void set_shared_path_table(SharedPathTable table) { + set_mapped_offset((char*)table.table(), &_shared_path_table_offset); + _shared_path_table_size = table.size(); } - void shared_path_table_metaspace_pointers_do(MetaspaceClosure* it) { - assert(DynamicDumpSharedSpaces, "only"); - _shared_path_table.metaspace_pointers_do(it); + void set_final_requested_base(char* b) { + _requested_base_address = b; + _mapped_base_address = 0; + } + + SharedPathTable shared_path_table() const { + return SharedPathTable((Array*)from_mapped_offset(_shared_path_table_offset), + _shared_path_table_size); } bool validate(); @@ -301,6 +323,7 @@ bool _is_static; bool _file_open; + bool _is_mapped; int _fd; size_t _file_offset; const char* _full_path; @@ -327,8 +350,11 @@ static bool get_base_archive_name_from_header(const char* archive_name, int* size, char** base_archive_name); static bool check_archive(const char* archive_name, bool is_static); + static SharedPathTable shared_path_table() { + return _shared_path_table; + } void restore_shared_path_table(); - bool init_from_file(int fd, bool is_static); + bool init_from_file(int fd); static void metaspace_pointers_do(MetaspaceClosure* it); void log_paths(const char* msg, int start_idx, int end_idx); @@ -341,7 +367,7 @@ void set_header_crc(int crc) { header()->set_crc(crc); } int space_crc(int i) const { return space_at(i)->crc(); } void populate_header(size_t alignment); - bool validate_header(bool is_static); + bool validate_header(); void invalidate(); int crc() const { return header()->crc(); } int version() const { return header()->version(); } @@ -370,11 +396,17 @@ header()->set_i2i_entry_code_buffers(addr, s); } - void set_core_spaces_size(size_t s) const { header()->set_core_spaces_size(s); } - size_t core_spaces_size() const { return header()->core_spaces_size(); } + bool is_static() const { return _is_static; } + bool is_mapped() const { return _is_mapped; } + void set_is_mapped(bool v) { _is_mapped = v; } + const char* full_path() const { return _full_path; } + void set_final_requested_base(char* b); + + char* requested_base_address() const { return header()->requested_base_address(); } + class DynamicArchiveHeader* dynamic_header() const { - assert(!_is_static, "must be"); + assert(!is_static(), "must be"); return (DynamicArchiveHeader*)header(); } @@ -402,21 +434,21 @@ static void assert_mark(bool check); // File manipulation. - bool initialize(bool is_static) NOT_CDS_RETURN_(false); - bool open_for_read(const char* path = NULL); + bool initialize() NOT_CDS_RETURN_(false); + bool open_for_read(); void open_for_write(const char* path = NULL); void write_header(); void write_region(int region, char* base, size_t size, bool read_only, bool allow_exec); + void write_bitmap_region(const CHeapBitMap* ptrmap); size_t write_archive_heap_regions(GrowableArray *heap_mem, GrowableArray *oopmaps, int first_region_id, int max_num_regions); void write_bytes(const void* buffer, size_t count); void write_bytes_aligned(const void* buffer, size_t count); size_t read_bytes(void* buffer, size_t count); - char* map_regions(int regions[], char* saved_base[], size_t len); - char* map_region(int i, char** top_ret); - void map_heap_regions_impl() NOT_CDS_JAVA_HEAP_RETURN; + MapArchiveResult map_regions(int regions[], int num_regions, char* mapped_base_address, ReservedSpace rs); + void unmap_regions(int regions[], int num_regions); void map_heap_regions() NOT_CDS_JAVA_HEAP_RETURN; void fixup_mapped_heap_regions() NOT_CDS_JAVA_HEAP_RETURN; void patch_archived_heap_embedded_pointers() NOT_CDS_JAVA_HEAP_RETURN; @@ -424,7 +456,6 @@ int first_region_idx) NOT_CDS_JAVA_HEAP_RETURN; bool has_heap_regions() NOT_CDS_JAVA_HEAP_RETURN_(false); MemRegion get_heap_regions_range_with_current_oop_encoding_mode() NOT_CDS_JAVA_HEAP_RETURN_(MemRegion()); - void unmap_regions(int regions[], char* saved_base[], size_t len); void unmap_region(int i); bool verify_region_checksum(int i); void close(); @@ -452,6 +483,9 @@ static void check_nonempty_dir_in_shared_path_table(); bool validate_shared_path_table(); void validate_non_existent_class_paths(); + static void set_shared_path_table(FileMapInfo* info) { + _shared_path_table = info->header()->shared_path_table(); + } static void update_jar_manifest(ClassPathEntry *cpe, SharedClassPathEntry* ent, TRAPS); static int num_non_existent_class_paths(); static void record_non_existent_class_path_entry(const char* path); @@ -475,12 +509,28 @@ char* region_addr(int idx); + // The offset of the first core region in the archive, relative to SharedBaseAddress + size_t mapping_base_offset() const { return first_core_space()->mapping_offset(); } + // The offset of the (exclusive) end of the last core region in this archive, relative to SharedBaseAddress + size_t mapping_end_offset() const { return last_core_space()->mapping_end_offset(); } + + char* mapped_base() const { return first_core_space()->mapped_base(); } + char* mapped_end() const { return last_core_space()->mapped_end(); } + + // Non-zero if the archive needs to be mapped a non-default location due to ASLR. + intx relocation_delta() const { + return header()->mapped_base_address() - header()->requested_base_address(); + } + + FileMapRegion* first_core_space() const; + FileMapRegion* last_core_space() const; + private: void seek_to_position(size_t pos); char* skip_first_path_entry(const char* path) NOT_CDS_RETURN_(NULL); int num_paths(const char* path) NOT_CDS_RETURN_(0); GrowableArray* create_path_array(const char* path) NOT_CDS_RETURN_(NULL); - bool fail(const char* msg, const char* name) NOT_CDS_RETURN_(false); + bool classpath_failure(const char* msg, const char* name) NOT_CDS_RETURN_(false); bool check_paths(int shared_path_start_idx, int num_paths, GrowableArray* rp_array) NOT_CDS_RETURN_(false); bool validate_boot_class_paths() NOT_CDS_RETURN_(false); @@ -489,6 +539,11 @@ bool is_open = false) NOT_CDS_JAVA_HEAP_RETURN_(false); bool region_crc_check(char* buf, size_t size, int expected_crc) NOT_CDS_RETURN_(false); void dealloc_archive_heap_regions(MemRegion* regions, int num, bool is_open) NOT_CDS_JAVA_HEAP_RETURN; + void map_heap_regions_impl() NOT_CDS_JAVA_HEAP_RETURN; + char* map_relocation_bitmap(size_t& bitmap_size); + MapArchiveResult map_region(int i, intx addr_delta, char* mapped_base_address, ReservedSpace rs); + bool read_region(int i, char* base, size_t size); + bool relocate_pointers(intx addr_delta); FileMapRegion* space_at(int i) const { return header()->space_at(i); diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/memory/heapShared.cpp --- a/src/hotspot/share/memory/heapShared.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/memory/heapShared.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -26,15 +26,18 @@ #include "classfile/javaClasses.inline.hpp" #include "classfile/stringTable.hpp" #include "classfile/symbolTable.hpp" +#include "classfile/systemDictionaryShared.hpp" #include "classfile/vmSymbols.hpp" #include "logging/log.hpp" #include "logging/logMessage.hpp" #include "logging/logStream.hpp" +#include "memory/archiveUtils.hpp" #include "memory/filemap.hpp" #include "memory/heapShared.inline.hpp" #include "memory/iterator.inline.hpp" #include "memory/metadataFactory.hpp" #include "memory/metaspaceClosure.hpp" +#include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/compressedOops.inline.hpp" @@ -383,8 +386,13 @@ _k->external_name(), i, subgraph_k->external_name()); } _subgraph_object_klasses->at_put(i, subgraph_k); + ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i)); } } + + ArchivePtrMarker::mark_pointer(&_k); + ArchivePtrMarker::mark_pointer(&_entry_field_records); + ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses); } struct CopyKlassSubGraphInfoToArchive : StackObj { @@ -397,7 +405,7 @@ (ArchivedKlassSubGraphInfoRecord*)MetaspaceShared::read_only_space_alloc(sizeof(ArchivedKlassSubGraphInfoRecord)); record->init(&info); - unsigned int hash = primitive_hash(klass); + unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(klass); u4 delta = MetaspaceShared::object_delta_u4(record); _writer->add(hash, delta); } @@ -436,7 +444,7 @@ } assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); - unsigned int hash = primitive_hash(k); + unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(k); const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); // Initialize from archived data. Currently this is done only @@ -606,8 +614,20 @@ assert(orig_obj != NULL, "must be"); assert(!is_archived_object(orig_obj), "sanity"); - // java.lang.Class instances cannot be included in an archived - // object sub-graph. + if (!JavaClasses::is_supported_for_archiving(orig_obj)) { + // This object has injected fields that cannot be supported easily, so we disallow them for now. + // If you get an error here, you probably made a change in the JDK library that has added + // these objects that are referenced (directly or indirectly) by static fields. + ResourceMark rm; + log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name()); + vm_exit(1); + } + + // java.lang.Class instances cannot be included in an archived object sub-graph. We only support + // them as Klass::_archived_mirror because they need to be specially restored at run time. + // + // If you get an error here, you probably made a change in the JDK library that has added a Class + // object that is referenced (directly or indirectly) by static fields. if (java_lang_Class::is_instance(orig_obj)) { log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level); vm_exit(1); diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/memory/metaspace.cpp --- a/src/hotspot/share/memory/metaspace.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/memory/metaspace.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -975,25 +975,18 @@ #ifdef _LP64 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); -void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { +void Metaspace::set_narrow_klass_base_and_shift(ReservedSpace metaspace_rs, address cds_base) { assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class."); // Figure out the narrow_klass_base and the narrow_klass_shift. The // narrow_klass_base is the lower of the metaspace base and the cds base // (if cds is enabled). The narrow_klass_shift depends on the distance // between the lower base and higher address. - address lower_base; - address higher_address; -#if INCLUDE_CDS - if (UseSharedSpaces) { - higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()), - (address)(metaspace_base + compressed_class_space_size())); - lower_base = MIN2(metaspace_base, cds_base); - } else -#endif - { - higher_address = metaspace_base + compressed_class_space_size(); - lower_base = metaspace_base; - + address lower_base = (address)metaspace_rs.base(); + address higher_address = (address)metaspace_rs.end(); + if (cds_base != NULL) { + assert(UseSharedSpaces, "must be"); + lower_base = MIN2(lower_base, cds_base); + } else { uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; // If compressed class space fits in lower 32G, we don't need a base. if (higher_address <= (address)klass_encoding_max) { @@ -1018,21 +1011,8 @@ AOTLoader::set_narrow_klass_shift(); } -#if INCLUDE_CDS -// Return TRUE if the specified metaspace_base and cds_base are close enough -// to work with compressed klass pointers. -bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { - assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); - assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); - address lower_base = MIN2((address)metaspace_base, cds_base); - address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()), - (address)(metaspace_base + compressed_class_space_size())); - return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); -} -#endif - // Try to allocate the metaspace at the requested addr. -void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { +void Metaspace::allocate_metaspace_compressed_klass_ptrs(ReservedSpace metaspace_rs, char* requested_addr, address cds_base) { assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class."); assert(using_class_space(), "called improperly"); assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); @@ -1045,14 +1025,16 @@ // Don't use large pages for the class space. bool large_pages = false; + if (metaspace_rs.is_reserved()) { + // CDS should have already reserved the space. + assert(requested_addr == NULL, "not used"); + assert(cds_base != NULL, "CDS should have already reserved the memory space"); + } else { + assert(cds_base == NULL, "must be"); #if !(defined(AARCH64) || defined(AIX)) - ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), - _reserve_alignment, - large_pages, - requested_addr); + metaspace_rs = ReservedSpace(compressed_class_space_size(), _reserve_alignment, + large_pages, requested_addr); #else // AARCH64 - ReservedSpace metaspace_rs; - // Our compressed klass pointers may fit nicely into the lower 32 // bits. if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) { @@ -1077,19 +1059,6 @@ increment = 4*G; } -#if INCLUDE_CDS - if (UseSharedSpaces - && ! can_use_cds_with_metaspace_addr(a, cds_base)) { - // We failed to find an aligned base that will reach. Fall - // back to using our requested addr. - metaspace_rs = ReservedSpace(compressed_class_space_size(), - _reserve_alignment, - large_pages, - requested_addr); - break; - } -#endif - metaspace_rs = ReservedSpace(compressed_class_space_size(), _reserve_alignment, large_pages, @@ -1098,53 +1067,30 @@ break; } } - #endif // AARCH64 + } if (!metaspace_rs.is_reserved()) { -#if INCLUDE_CDS - if (UseSharedSpaces) { - size_t increment = align_up(1*G, _reserve_alignment); - - // Keep trying to allocate the metaspace, increasing the requested_addr - // by 1GB each time, until we reach an address that will no longer allow - // use of CDS with compressed klass pointers. - char *addr = requested_addr; - while (!metaspace_rs.is_reserved() && (addr + increment > addr) && - can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { - addr = addr + increment; - metaspace_rs = ReservedSpace(compressed_class_space_size(), - _reserve_alignment, large_pages, addr); - } - } -#endif + assert(cds_base == NULL, "CDS should have already reserved the memory space"); // If no successful allocation then try to allocate the space anywhere. If // that fails then OOM doom. At this point we cannot try allocating the // metaspace as if UseCompressedClassPointers is off because too much // initialization has happened that depends on UseCompressedClassPointers. // So, UseCompressedClassPointers cannot be turned off at this point. + metaspace_rs = ReservedSpace(compressed_class_space_size(), + _reserve_alignment, large_pages); if (!metaspace_rs.is_reserved()) { - metaspace_rs = ReservedSpace(compressed_class_space_size(), - _reserve_alignment, large_pages); - if (!metaspace_rs.is_reserved()) { - vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes", - compressed_class_space_size())); - } + vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes", + compressed_class_space_size())); } } - // If we got here then the metaspace got allocated. - MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); + if (cds_base == NULL) { + // If we got here then the metaspace got allocated. + MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); + } -#if INCLUDE_CDS - // Verify that we can use shared spaces. Otherwise, turn off CDS. - if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { - FileMapInfo::stop_sharing_and_unmap( - "Could not allocate metaspace at a compatible address"); - } -#endif - set_narrow_klass_base_and_shift((address)metaspace_rs.base(), - UseSharedSpaces ? (address)cds_base : 0); + set_narrow_klass_base_and_shift(metaspace_rs, cds_base); initialize_class_space(metaspace_rs); @@ -1247,31 +1193,30 @@ void Metaspace::global_initialize() { MetaspaceGC::initialize(); + bool class_space_inited = false; #if INCLUDE_CDS if (DumpSharedSpaces) { MetaspaceShared::initialize_dumptime_shared_and_meta_spaces(); + class_space_inited = true; } else if (UseSharedSpaces) { // If any of the archived space fails to map, UseSharedSpaces - // is reset to false. Fall through to the - // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class - // metaspace. + // is reset to false. MetaspaceShared::initialize_runtime_shared_and_meta_spaces(); + class_space_inited = UseSharedSpaces; } if (DynamicDumpSharedSpaces && !UseSharedSpaces) { vm_exit_during_initialization("DynamicDumpSharedSpaces is unsupported when base CDS archive is not loaded", NULL); } +#endif // INCLUDE_CDS - if (!DumpSharedSpaces && !UseSharedSpaces) -#endif // INCLUDE_CDS - { #ifdef _LP64 - if (using_class_space()) { - char* base = (char*)align_up(CompressedOops::end(), _reserve_alignment); - allocate_metaspace_compressed_klass_ptrs(base, 0); - } -#endif // _LP64 + if (using_class_space() && !class_space_inited) { + char* base = (char*)align_up(CompressedOops::end(), _reserve_alignment); + ReservedSpace dummy; + allocate_metaspace_compressed_klass_ptrs(dummy, base, 0); } +#endif // Initialize these before initializing the VirtualSpaceList _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/memory/metaspace.hpp --- a/src/hotspot/share/memory/metaspace.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/memory/metaspace.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -172,16 +172,13 @@ assert(!_frozen, "sanity"); } #ifdef _LP64 - static void allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base); + static void allocate_metaspace_compressed_klass_ptrs(ReservedSpace metaspace_rs, char* requested_addr, address cds_base); #endif private: #ifdef _LP64 - static void set_narrow_klass_base_and_shift(address metaspace_base, address cds_base); - - // Returns true if can use CDS with metaspace allocated as specified address. - static bool can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base); + static void set_narrow_klass_base_and_shift(ReservedSpace metaspace_rs, address cds_base); static void initialize_class_space(ReservedSpace rs); #endif diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/memory/metaspaceClosure.hpp --- a/src/hotspot/share/memory/metaspaceClosure.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/memory/metaspaceClosure.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -75,6 +75,10 @@ _default }; + enum SpecialRef { + _method_entry_ref + }; + // class MetaspaceClosure::Ref -- // // MetaspaceClosure can be viewed as a very simple type of copying garbage @@ -278,6 +282,16 @@ template void push(T** mpp, Writability w = _default) { push_impl(new ObjectRef(mpp, w)); } + + template void push_method_entry(T** mpp, intptr_t* p) { + push_special(_method_entry_ref, new ObjectRef(mpp, _default), (intptr_t*)p); + } + + // This is for tagging special pointers that are not a reference to MetaspaceObj. It's currently + // used to mark the method entry points in Method/ConstMethod. + virtual void push_special(SpecialRef type, Ref* obj, intptr_t* p) { + assert(type == _method_entry_ref, "only special type allowed for now"); + } }; // This is a special MetaspaceClosure that visits each unique MetaspaceObj once. diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/memory/metaspaceShared.cpp --- a/src/hotspot/share/memory/metaspaceShared.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/memory/metaspaceShared.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -41,6 +41,8 @@ #include "interpreter/bytecodes.hpp" #include "logging/log.hpp" #include "logging/logMessage.hpp" +#include "memory/archiveUtils.inline.hpp" +#include "memory/dynamicArchive.hpp" #include "memory/filemap.hpp" #include "memory/heapShared.inline.hpp" #include "memory/metaspace.hpp" @@ -48,7 +50,6 @@ #include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" -#include "memory/dynamicArchive.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/instanceClassLoaderKlass.hpp" #include "oops/instanceMirrorKlass.hpp" @@ -67,7 +68,7 @@ #include "runtime/vmThread.hpp" #include "runtime/vmOperations.hpp" #include "utilities/align.hpp" -#include "utilities/bitMap.hpp" +#include "utilities/bitMap.inline.hpp" #include "utilities/defaultStream.hpp" #include "utilities/hashtable.inline.hpp" #if INCLUDE_G1GC @@ -82,8 +83,8 @@ bool MetaspaceShared::_remapped_readwrite = false; address MetaspaceShared::_i2i_entry_code_buffers = NULL; size_t MetaspaceShared::_i2i_entry_code_buffers_size = 0; -size_t MetaspaceShared::_core_spaces_size = 0; void* MetaspaceShared::_shared_metaspace_static_top = NULL; +intx MetaspaceShared::_relocation_delta; // The CDS archive is divided into the following regions: // mc - misc code (the method entry trampolines) @@ -147,9 +148,21 @@ return p; } +void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) { + assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment"); + intptr_t *p = (intptr_t*)_top; + char* newtop = _top + sizeof(intptr_t); + expand_top_to(newtop); + *p = n; + if (need_to_mark) { + ArchivePtrMarker::mark_pointer(p); + } +} + void DumpRegion::print(size_t total_bytes) const { tty->print_cr("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT, - _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), p2i(_base)); + _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), + p2i(_base + MetaspaceShared::final_delta())); } void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) { @@ -172,14 +185,14 @@ } } -DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"); -size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0; +static DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"); +static size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0; void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space, address first_space_bottom) { // Start with 0 committed bytes. The memory will be committed as needed by // MetaspaceShared::commit_shared_space_to(). if (!_shared_vs.initialize(_shared_rs, 0)) { - vm_exit_during_initialization("Unable to allocate memory for shared space"); + fatal("Unable to allocate memory for shared space"); } first_space->init(&_shared_rs, (char*)first_space_bottom); } @@ -209,73 +222,32 @@ return _ro_region.allocate(num_bytes); } -void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() { - assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled"); - - // If using shared space, open the file that contains the shared space - // and map in the memory before initializing the rest of metaspace (so - // the addresses don't conflict) - FileMapInfo* mapinfo = new FileMapInfo(true); - - // Open the shared archive file, read and validate the header. If - // initialization fails, shared spaces [UseSharedSpaces] are - // disabled and the file is closed. - // Map in spaces now also - if (mapinfo->initialize(true) && map_shared_spaces(mapinfo)) { - size_t cds_total = core_spaces_size(); - address cds_address = (address)mapinfo->region_addr(0); - char* cds_end = (char *)align_up(cds_address + cds_total, - Metaspace::reserve_alignment()); - - // Mapping the dynamic archive before allocating the class space - cds_end = initialize_dynamic_runtime_shared_spaces((char*)cds_address, cds_end); - -#ifdef _LP64 - if (Metaspace::using_class_space()) { - // If UseCompressedClassPointers is set then allocate the metaspace area - // above the heap and above the CDS area (if it exists). - Metaspace::allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); - // map_heap_regions() compares the current narrow oop and klass encodings - // with the archived ones, so it must be done after all encodings are determined. - mapinfo->map_heap_regions(); - } - CompressedKlassPointers::set_range(CompressedClassSpaceSize); -#endif // _LP64 +// When reserving an address range using ReservedSpace, we need an alignment that satisfies both: +// os::vm_allocation_granularity() -- so that we can sub-divide this range into multiple mmap regions, +// while keeping the first range at offset 0 of this range. +// Metaspace::reserve_alignment() -- so we can pass the region to +// Metaspace::allocate_metaspace_compressed_klass_ptrs. +size_t MetaspaceShared::reserved_space_alignment() { + size_t os_align = os::vm_allocation_granularity(); + size_t ms_align = Metaspace::reserve_alignment(); + if (os_align >= ms_align) { + assert(os_align % ms_align == 0, "must be a multiple"); + return os_align; } else { - assert(!mapinfo->is_open() && !UseSharedSpaces, - "archive file not closed or shared spaces not disabled."); + assert(ms_align % os_align == 0, "must be a multiple"); + return ms_align; } } -char* MetaspaceShared::initialize_dynamic_runtime_shared_spaces( - char* static_start, char* static_end) { - assert(UseSharedSpaces, "must be runtime"); - char* cds_end = static_end; - if (!DynamicDumpSharedSpaces) { - address dynamic_top = DynamicArchive::map(); - if (dynamic_top != NULL) { - assert(dynamic_top > (address)static_start, "Unexpected layout"); - MetaspaceObj::expand_shared_metaspace_range(dynamic_top); - cds_end = (char *)align_up(dynamic_top, Metaspace::reserve_alignment()); - } - } - return cds_end; -} - -ReservedSpace* MetaspaceShared::reserve_shared_rs(size_t size, size_t alignment, - bool large, char* requested_address) { - if (requested_address != NULL) { - _shared_rs = ReservedSpace(size, alignment, large, requested_address); - } else { - _shared_rs = ReservedSpace(size, alignment, large); - } - return &_shared_rs; +ReservedSpace MetaspaceShared::reserve_shared_space(size_t size, char* requested_address) { + bool large_pages = false; // Don't use large pages for the CDS archive. + assert(is_aligned(requested_address, reserved_space_alignment()), "must be"); + return ReservedSpace(size, reserved_space_alignment(), large_pages, requested_address); } void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() { assert(DumpSharedSpaces, "should be called for dump time only"); - const size_t reserve_alignment = Metaspace::reserve_alignment(); - bool large_pages = false; // No large pages when dumping the CDS archive. + const size_t reserve_alignment = reserved_space_alignment(); char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment); #ifdef _LP64 @@ -296,15 +268,22 @@ size_t cds_total = align_down(256*M, reserve_alignment); #endif + bool use_requested_base = true; + if (ArchiveRelocationMode == 1) { + log_info(cds)("ArchiveRelocationMode == 1: always allocate class space at an alternative address"); + use_requested_base = false; + } + // First try to reserve the space at the specified SharedBaseAddress. - //_shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base); - reserve_shared_rs(cds_total, reserve_alignment, large_pages, shared_base); + assert(!_shared_rs.is_reserved(), "must be"); + if (use_requested_base) { + _shared_rs = reserve_shared_space(cds_total, shared_base); + } if (_shared_rs.is_reserved()) { assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match"); } else { // Get a mmap region anywhere if the SharedBaseAddress fails. - //_shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages); - reserve_shared_rs(cds_total, reserve_alignment, large_pages, NULL); + _shared_rs = reserve_shared_space(cds_total); } if (!_shared_rs.is_reserved()) { vm_exit_during_initialization("Unable to reserve memory for shared space", @@ -442,6 +421,8 @@ assert(commit <= uncommitted, "sanity"); bool result = _shared_vs.expand_by(commit, false); + ArchivePtrMarker::expand_ptr_end((address*)_shared_vs.high()); + if (!result) { vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes", need_committed_size)); @@ -451,6 +432,10 @@ commit, _shared_vs.actual_committed_size(), _shared_vs.high()); } +void MetaspaceShared::initialize_ptr_marker(CHeapBitMap* ptrmap) { + ArchivePtrMarker::initialize(ptrmap, (address*)_shared_vs.low(), (address*)_shared_vs.high()); +} + // Read/write a data stream for restoring/preserving metadata pointers and // miscellaneous data from/to the shared archive file. @@ -469,6 +454,7 @@ soc->do_tag(sizeof(Symbol)); // Dump/restore miscellaneous metadata. + JavaClasses::serialize_offsets(soc); Universe::serialize(soc); soc->do_tag(--tag); @@ -482,7 +468,6 @@ HeapShared::serialize_subgraph_info_table_header(soc); SystemDictionaryShared::serialize_dictionary_headers(soc); - JavaClasses::serialize_offsets(soc); InstanceMirrorKlass::serialize_offsets(soc); soc->do_tag(--tag); @@ -705,7 +690,9 @@ // Switch the vtable pointer to point to the cloned vtable. static void patch(Metadata* obj) { assert(DumpSharedSpaces, "dump-time only"); + assert(MetaspaceShared::is_in_output_space(obj), "must be"); *(void**)obj = (void*)(_info->cloned_vtable()); + ArchivePtrMarker::mark_pointer(obj); } static bool is_valid_shared_object(const T* obj) { @@ -799,7 +786,8 @@ } #define ALLOC_CPP_VTABLE_CLONE(c) \ - _cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner::allocate(#c); + _cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner::allocate(#c); \ + ArchivePtrMarker::mark_pointer(&_cloned_cpp_vtptrs[c##_Kind]); #define CLONE_CPP_VTABLE(c) \ p = CppVtableCloner::clone_vtable(#c, (CppVtableInfo*)p); @@ -965,7 +953,7 @@ assert(size % sizeof(intptr_t) == 0, "bad size"); do_tag((int)size); while (size > 0) { - _dump_region->append_intptr_t(*(intptr_t*)start); + _dump_region->append_intptr_t(*(intptr_t*)start, true); start += sizeof(intptr_t); size -= sizeof(intptr_t); } @@ -1129,9 +1117,13 @@ GrowableArray* oopmaps); void dump_symbols(); char* dump_read_only_tables(); + void print_class_stats(); void print_region_stats(); + void print_bitmap_region_stats(size_t size, size_t total_size); void print_heap_region_stats(GrowableArray *heap_mem, - const char *name, const size_t total_size); + const char *name, size_t total_size); + void relocate_to_default_base_address(CHeapBitMap* ptrmap); + public: VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } @@ -1276,6 +1268,15 @@ ref->metaspace_pointers_do_at(&refer, new_loc); return true; // recurse into ref.obj() } + virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) { + assert(type == _method_entry_ref, "only special type allowed for now"); + address obj = ref->obj(); + address new_obj = get_new_loc(ref); + size_t offset = pointer_delta(p, obj, sizeof(u1)); + intptr_t* new_p = (intptr_t*)(new_obj + offset); + assert(*p == *new_p, "must be a copy"); + ArchivePtrMarker::mark_pointer((address*)new_p); + } }; // Relocate a reference to point to its shallow copy @@ -1284,6 +1285,7 @@ virtual bool do_ref(Ref* ref, bool read_only) { if (ref->not_null()) { ref->update(get_new_loc(ref)); + ArchivePtrMarker::mark_pointer(ref->addr()); } return false; // Do not recurse. } @@ -1440,7 +1442,71 @@ return start; } +void VM_PopulateDumpSharedSpace::print_class_stats() { + tty->print_cr("Number of classes %d", _global_klass_objects->length()); + { + int num_type_array = 0, num_obj_array = 0, num_inst = 0; + for (int i = 0; i < _global_klass_objects->length(); i++) { + Klass* k = _global_klass_objects->at(i); + if (k->is_instance_klass()) { + num_inst ++; + } else if (k->is_objArray_klass()) { + num_obj_array ++; + } else { + assert(k->is_typeArray_klass(), "sanity"); + num_type_array ++; + } + } + tty->print_cr(" instance classes = %5d", num_inst); + tty->print_cr(" obj array classes = %5d", num_obj_array); + tty->print_cr(" type array classes = %5d", num_type_array); + } +} + +void VM_PopulateDumpSharedSpace::relocate_to_default_base_address(CHeapBitMap* ptrmap) { + intx addr_delta = MetaspaceShared::final_delta(); + if (addr_delta == 0) { + ArchivePtrMarker::compact((address)SharedBaseAddress, (address)_md_region.top()); + } else { + // We are not able to reserve space at Arguments::default_SharedBaseAddress() (due to ASLR). + // This means that the current content of the archive is based on a random + // address. Let's relocate all the pointers, so that it can be mapped to + // Arguments::default_SharedBaseAddress() without runtime relocation. + // + // Note: both the base and dynamic archive are written with + // FileMapHeader::_shared_base_address == Arguments::default_SharedBaseAddress() + + // Patch all pointers that are marked by ptrmap within this region, + // where we have just dumped all the metaspace data. + address patch_base = (address)SharedBaseAddress; + address patch_end = (address)_md_region.top(); + size_t size = patch_end - patch_base; + + // the current value of the pointers to be patched must be within this + // range (i.e., must point to valid metaspace objects) + address valid_old_base = patch_base; + address valid_old_end = patch_end; + + // after patching, the pointers must point inside this range + // (the requested location of the archive, as mapped at runtime). + address valid_new_base = (address)Arguments::default_SharedBaseAddress(); + address valid_new_end = valid_new_base + size; + + log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT " ] to " + "[" INTPTR_FORMAT " - " INTPTR_FORMAT " ]", p2i(patch_base), p2i(patch_end), + p2i(valid_new_base), p2i(valid_new_end)); + + SharedDataRelocator patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end, + valid_new_base, valid_new_end, addr_delta, ptrmap); + ptrmap->iterate(&patcher); + ArchivePtrMarker::compact(patcher.max_non_null_offset()); + } +} + void VM_PopulateDumpSharedSpace::doit() { + CHeapBitMap ptrmap; + MetaspaceShared::initialize_ptr_marker(&ptrmap); + // We should no longer allocate anything from the metaspace, so that: // // (1) Metaspace::allocate might trigger GC if we have run out of @@ -1472,24 +1538,7 @@ CollectClassesClosure collect_classes; ClassLoaderDataGraph::loaded_classes_do(&collect_classes); - tty->print_cr("Number of classes %d", _global_klass_objects->length()); - { - int num_type_array = 0, num_obj_array = 0, num_inst = 0; - for (int i = 0; i < _global_klass_objects->length(); i++) { - Klass* k = _global_klass_objects->at(i); - if (k->is_instance_klass()) { - num_inst ++; - } else if (k->is_objArray_klass()) { - num_obj_array ++; - } else { - assert(k->is_typeArray_klass(), "sanity"); - num_type_array ++; - } - } - tty->print_cr(" instance classes = %5d", num_inst); - tty->print_cr(" obj array classes = %5d", num_obj_array); - tty->print_cr(" type array classes = %5d", num_type_array); - } + print_class_stats(); // Ensure the ConstMethods won't be modified at run-time tty->print("Updating ConstMethods ... "); @@ -1520,12 +1569,6 @@ MetaspaceShared::allocate_cpp_vtable_clones(); _md_region.pack(); - // The 4 core spaces are allocated consecutively mc->rw->ro->md, so there total size - // is just the spaces between the two ends. - size_t core_spaces_size = _md_region.end() - _mc_region.base(); - assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()), - "should already be aligned"); - // During patching, some virtual methods may be called, so at this point // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate). MetaspaceShared::patch_cpp_vtable_pointers(); @@ -1534,6 +1577,10 @@ // We don't want to write these addresses into the archive. MetaspaceShared::zero_cpp_vtable_clones_for_writing(); + // relocate the data so that it can be mapped to Arguments::default_SharedBaseAddress() + // without runtime relocation. + relocate_to_default_base_address(&ptrmap); + // Create and write the archive file that maps the shared spaces. FileMapInfo* mapinfo = new FileMapInfo(true); @@ -1542,7 +1589,6 @@ mapinfo->set_misc_data_patching_start(vtbl_list); mapinfo->set_i2i_entry_code_buffers(MetaspaceShared::i2i_entry_code_buffers(), MetaspaceShared::i2i_entry_code_buffers_size()); - mapinfo->set_core_spaces_size(core_spaces_size); mapinfo->open_for_write(); // NOTE: md contains the trampoline code for method entries, which are patched at run time, @@ -1552,6 +1598,8 @@ write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false); write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false); + mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap()); + _total_closed_archive_region_size = mapinfo->write_archive_heap_regions( _closed_archive_heap_regions, _closed_archive_heap_oopmaps, @@ -1563,6 +1611,7 @@ MetaspaceShared::first_open_archive_heap_region, MetaspaceShared::max_open_archive_heap_region); + mapinfo->set_final_requested_base((char*)Arguments::default_SharedBaseAddress()); mapinfo->set_header_crc(mapinfo->compute_header_crc()); mapinfo->write_header(); mapinfo->close(); @@ -1594,12 +1643,16 @@ void VM_PopulateDumpSharedSpace::print_region_stats() { // Print statistics of all the regions + const size_t bitmap_used = ArchivePtrMarker::ptrmap()->size_in_bytes(); + const size_t bitmap_reserved = align_up(bitmap_used, Metaspace::reserve_alignment()); const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() + _mc_region.reserved() + _md_region.reserved() + + bitmap_reserved + _total_closed_archive_region_size + _total_open_archive_region_size; const size_t total_bytes = _ro_region.used() + _rw_region.used() + _mc_region.used() + _md_region.used() + + bitmap_used + _total_closed_archive_region_size + _total_open_archive_region_size; const double total_u_perc = percent_of(total_bytes, total_reserved); @@ -1608,6 +1661,7 @@ _rw_region.print(total_reserved); _ro_region.print(total_reserved); _md_region.print(total_reserved); + print_bitmap_region_stats(bitmap_reserved, total_reserved); print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved); print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved); @@ -1615,8 +1669,13 @@ total_bytes, total_reserved, total_u_perc); } +void VM_PopulateDumpSharedSpace::print_bitmap_region_stats(size_t size, size_t total_size) { + tty->print_cr("bm space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT, + size, size/double(total_size)*100.0, size, p2i(NULL)); +} + void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray *heap_mem, - const char *name, const size_t total_size) { + const char *name, size_t total_size) { int arr_len = heap_mem == NULL ? 0 : heap_mem->length(); for (int i = 0; i < arr_len; i++) { char* start = (char*)heap_mem->at(i).start(); @@ -1636,9 +1695,13 @@ o->set_klass(k); } -Klass* MetaspaceShared::get_relocated_klass(Klass *k) { +Klass* MetaspaceShared::get_relocated_klass(Klass *k, bool is_final) { assert(DumpSharedSpaces, "sanity"); - return ArchiveCompactor::get_relocated_klass(k); + k = ArchiveCompactor::get_relocated_klass(k); + if (is_final) { + k = (Klass*)(address(k) + final_delta()); + } + return k; } class LinkSharedClassesClosure : public KlassClosure { @@ -1947,8 +2010,9 @@ } } -void MetaspaceShared::set_shared_metaspace_range(void* base, void* top) { - _shared_metaspace_static_top = top; +void MetaspaceShared::set_shared_metaspace_range(void* base, void *static_top, void* top) { + assert(base <= static_top && static_top <= top, "must be"); + _shared_metaspace_static_top = static_top; MetaspaceObj::set_shared_metaspace_range(base, top); } @@ -1973,49 +2037,312 @@ } } -// Map shared spaces at requested addresses and return if succeeded. -bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) { - size_t image_alignment = mapinfo->alignment(); +void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() { + assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled"); + MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE; + FileMapInfo* static_mapinfo = open_static_archive(); + FileMapInfo* dynamic_mapinfo = NULL; + + if (static_mapinfo != NULL) { + dynamic_mapinfo = open_dynamic_archive(); + + // First try to map at the requested address + result = map_archives(static_mapinfo, dynamic_mapinfo, true); + if (result == MAP_ARCHIVE_MMAP_FAILURE) { + // Mapping has failed (probably due to ASLR). Let's map at an address chosen + // by the OS. + result = map_archives(static_mapinfo, dynamic_mapinfo, false); + } + } + + if (result == MAP_ARCHIVE_SUCCESS) { + bool dynamic_mapped = (dynamic_mapinfo != NULL && dynamic_mapinfo->is_mapped()); + char* cds_base = static_mapinfo->mapped_base(); + char* cds_end = dynamic_mapped ? dynamic_mapinfo->mapped_end() : static_mapinfo->mapped_end(); + set_shared_metaspace_range(cds_base, static_mapinfo->mapped_end(), cds_end); + _relocation_delta = static_mapinfo->relocation_delta(); + if (dynamic_mapped) { + FileMapInfo::set_shared_path_table(dynamic_mapinfo); + } else { + FileMapInfo::set_shared_path_table(static_mapinfo); + } + } else { + set_shared_metaspace_range(NULL, NULL, NULL); + UseSharedSpaces = false; + FileMapInfo::fail_continue("Unable to map shared spaces"); + if (PrintSharedArchiveAndExit) { + vm_exit_during_initialization("Unable to use shared archive."); + } + } + + if (static_mapinfo != NULL && !static_mapinfo->is_mapped()) { + delete static_mapinfo; + } + if (dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped()) { + delete dynamic_mapinfo; + } +} + +FileMapInfo* MetaspaceShared::open_static_archive() { + FileMapInfo* mapinfo = new FileMapInfo(true); + if (!mapinfo->initialize()) { + delete(mapinfo); + return NULL; + } + return mapinfo; +} + +FileMapInfo* MetaspaceShared::open_dynamic_archive() { + if (DynamicDumpSharedSpaces) { + return NULL; + } + if (Arguments::GetSharedDynamicArchivePath() == NULL) { + return NULL; + } -#ifndef _WINDOWS - // Map in the shared memory and then map the regions on top of it. - // On Windows, don't map the memory here because it will cause the - // mappings of the regions to fail. - ReservedSpace shared_rs = mapinfo->reserve_shared_memory(); - if (!shared_rs.is_reserved()) return false; -#endif + FileMapInfo* mapinfo = new FileMapInfo(false); + if (!mapinfo->initialize()) { + delete(mapinfo); + return NULL; + } + return mapinfo; +} + +// use_requested_addr: +// true = map at FileMapHeader::_requested_base_address +// false = map at an alternative address picked by OS. +MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, FileMapInfo* dynamic_mapinfo, + bool use_requested_addr) { + PRODUCT_ONLY(if (ArchiveRelocationMode == 1 && use_requested_addr) { + // For product build only -- this is for benchmarking the cost of doing relocation. + // For debug builds, the check is done in FileMapInfo::map_regions for better test coverage. + log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address"); + return MAP_ARCHIVE_MMAP_FAILURE; + }); + + if (ArchiveRelocationMode == 2 && !use_requested_addr) { + log_info(cds)("ArchiveRelocationMode == 2: never map archive(s) at an alternative address"); + return MAP_ARCHIVE_MMAP_FAILURE; + }; + + if (dynamic_mapinfo != NULL) { + // Ensure that the OS won't be able to allocate new memory spaces between the two + // archives, or else it would mess up the simple comparision in MetaspaceObj::is_shared(). + assert(static_mapinfo->mapping_end_offset() == dynamic_mapinfo->mapping_base_offset(), "no gap"); + } - assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); + ReservedSpace main_rs, archive_space_rs, class_space_rs; + MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE; + char* mapped_base_address = reserve_address_space_for_archives(static_mapinfo, dynamic_mapinfo, + use_requested_addr, main_rs, archive_space_rs, + class_space_rs); + if (mapped_base_address == NULL) { + result = MAP_ARCHIVE_MMAP_FAILURE; + } else { + log_debug(cds)("Reserved archive_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes", + p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size()); + log_debug(cds)("Reserved class_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes", + p2i(class_space_rs.base()), p2i(class_space_rs.end()), class_space_rs.size()); + MapArchiveResult static_result = map_archive(static_mapinfo, mapped_base_address, archive_space_rs); + MapArchiveResult dynamic_result = (static_result == MAP_ARCHIVE_SUCCESS) ? + map_archive(dynamic_mapinfo, mapped_base_address, archive_space_rs) : MAP_ARCHIVE_OTHER_FAILURE; - // Map each shared region - int regions[] = {mc, rw, ro, md}; - size_t len = sizeof(regions)/sizeof(int); - char* saved_base[] = {NULL, NULL, NULL, NULL}; - char* top = mapinfo->map_regions(regions, saved_base, len ); + if (static_result == MAP_ARCHIVE_SUCCESS) { + if (dynamic_result == MAP_ARCHIVE_SUCCESS) { + result = MAP_ARCHIVE_SUCCESS; + } else if (dynamic_result == MAP_ARCHIVE_OTHER_FAILURE) { + assert(dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped(), "must have failed"); + // No need to retry mapping the dynamic archive again, as it will never succeed + // (bad file, etc) -- just keep the base archive. + log_warning(cds, dynamic)("Unable to use shared archive. The top archive failed to load: %s", + dynamic_mapinfo->full_path()); + result = MAP_ARCHIVE_SUCCESS; + // TODO, we can give the unused space for the dynamic archive to class_space_rs, but there's no + // easy API to do that right now. + } else { + result = MAP_ARCHIVE_MMAP_FAILURE; + } + } else if (static_result == MAP_ARCHIVE_OTHER_FAILURE) { + result = MAP_ARCHIVE_OTHER_FAILURE; + } else { + result = MAP_ARCHIVE_MMAP_FAILURE; + } + } - if (top != NULL && - (image_alignment == (size_t)os::vm_allocation_granularity()) && - mapinfo->validate_shared_path_table()) { - // Success -- set up MetaspaceObj::_shared_metaspace_{base,top} for - // fast checking in MetaspaceShared::is_in_shared_metaspace() and - // MetaspaceObj::is_shared(). - _core_spaces_size = mapinfo->core_spaces_size(); - set_shared_metaspace_range((void*)saved_base[0], (void*)top); - return true; + if (result == MAP_ARCHIVE_SUCCESS) { + if (!main_rs.is_reserved() && class_space_rs.is_reserved()) { + MemTracker::record_virtual_memory_type((address)class_space_rs.base(), mtClass); + } + SharedBaseAddress = (size_t)mapped_base_address; + LP64_ONLY({ + if (Metaspace::using_class_space()) { + assert(class_space_rs.is_reserved(), "must be"); + char* cds_base = static_mapinfo->mapped_base(); + Metaspace::allocate_metaspace_compressed_klass_ptrs(class_space_rs, NULL, (address)cds_base); + // map_heap_regions() compares the current narrow oop and klass encodings + // with the archived ones, so it must be done after all encodings are determined. + static_mapinfo->map_heap_regions(); + } + CompressedKlassPointers::set_range(CompressedClassSpaceSize); + }); + } else { + unmap_archive(static_mapinfo); + unmap_archive(dynamic_mapinfo); + release_reserved_spaces(main_rs, archive_space_rs, class_space_rs); + } + + return result; +} + +char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_mapinfo, + FileMapInfo* dynamic_mapinfo, + bool use_requested_addr, + ReservedSpace& main_rs, + ReservedSpace& archive_space_rs, + ReservedSpace& class_space_rs) { + const bool use_klass_space = NOT_LP64(false) LP64_ONLY(Metaspace::using_class_space()); + const size_t class_space_size = NOT_LP64(0) LP64_ONLY(Metaspace::compressed_class_space_size()); + + if (use_klass_space) { + assert(class_space_size > 0, "CompressedClassSpaceSize must have been validated"); + } + if (use_requested_addr && !is_aligned(static_mapinfo->requested_base_address(), reserved_space_alignment())) { + return NULL; + } + + // Size and requested location of the archive_space_rs (for both static and dynamic archives) + size_t base_offset = static_mapinfo->mapping_base_offset(); + size_t end_offset = (dynamic_mapinfo == NULL) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset(); + assert(base_offset == 0, "must be"); + assert(is_aligned(end_offset, os::vm_allocation_granularity()), "must be"); + assert(is_aligned(base_offset, os::vm_allocation_granularity()), "must be"); + + // In case reserved_space_alignment() != os::vm_allocation_granularity() + assert((size_t)os::vm_allocation_granularity() <= reserved_space_alignment(), "must be"); + end_offset = align_up(end_offset, reserved_space_alignment()); + + size_t archive_space_size = end_offset - base_offset; + + // Special handling for Windows because it cannot mmap into a reserved space: + // use_requested_addr: We just map each region individually, and give up if any one of them fails. + // !use_requested_addr: We reserve the space first, and then os::read in all the regions (instead of mmap). + // We're going to patch all the pointers anyway so there's no benefit for mmap. + + if (use_requested_addr) { + char* archive_space_base = static_mapinfo->requested_base_address() + base_offset; + char* archive_space_end = archive_space_base + archive_space_size; + if (!MetaspaceShared::use_windows_memory_mapping()) { + archive_space_rs = reserve_shared_space(archive_space_size, archive_space_base); + if (!archive_space_rs.is_reserved()) { + return NULL; + } + } + if (use_klass_space) { + // Make sure we can map the klass space immediately following the archive_space space + char* class_space_base = archive_space_end; + class_space_rs = reserve_shared_space(class_space_size, class_space_base); + if (!class_space_rs.is_reserved()) { + return NULL; + } + } + return static_mapinfo->requested_base_address(); } else { - mapinfo->unmap_regions(regions, saved_base, len); -#ifndef _WINDOWS - // Release the entire mapped region - shared_rs.release(); -#endif - // If -Xshare:on is specified, print out the error message and exit VM, - // otherwise, set UseSharedSpaces to false and continue. - if (RequireSharedSpaces || PrintSharedArchiveAndExit) { - vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on."); + if (use_klass_space) { + main_rs = reserve_shared_space(archive_space_size + class_space_size); + if (main_rs.is_reserved()) { + archive_space_rs = main_rs.first_part(archive_space_size, reserved_space_alignment(), /*split=*/true); + class_space_rs = main_rs.last_part(archive_space_size); + } + } else { + main_rs = reserve_shared_space(archive_space_size); + archive_space_rs = main_rs; + } + if (archive_space_rs.is_reserved()) { + return archive_space_rs.base(); } else { - FLAG_SET_DEFAULT(UseSharedSpaces, false); + return NULL; + } + } +} + +void MetaspaceShared::release_reserved_spaces(ReservedSpace& main_rs, + ReservedSpace& archive_space_rs, + ReservedSpace& class_space_rs) { + if (main_rs.is_reserved()) { + assert(main_rs.contains(archive_space_rs.base()), "must be"); + assert(main_rs.contains(class_space_rs.base()), "must be"); + log_debug(cds)("Released shared space (archive+classes) " INTPTR_FORMAT, p2i(main_rs.base())); + main_rs.release(); + } else { + if (archive_space_rs.is_reserved()) { + log_debug(cds)("Released shared space (archive) " INTPTR_FORMAT, p2i(archive_space_rs.base())); + archive_space_rs.release(); + } + if (class_space_rs.is_reserved()) { + log_debug(cds)("Released shared space (classes) " INTPTR_FORMAT, p2i(class_space_rs.base())); + class_space_rs.release(); } - return false; + } +} + +static int static_regions[] = {MetaspaceShared::mc, + MetaspaceShared::rw, + MetaspaceShared::ro, + MetaspaceShared::md}; +static int dynamic_regions[] = {MetaspaceShared::rw, + MetaspaceShared::ro, + MetaspaceShared::mc}; +static int static_regions_count = 4; +static int dynamic_regions_count = 3; + +MapArchiveResult MetaspaceShared::map_archive(FileMapInfo* mapinfo, char* mapped_base_address, ReservedSpace rs) { + assert(UseSharedSpaces, "must be runtime"); + if (mapinfo == NULL) { + return MAP_ARCHIVE_SUCCESS; // no error has happeed -- trivially succeeded. + } + + mapinfo->set_is_mapped(false); + + if (mapinfo->alignment() != (size_t)os::vm_allocation_granularity()) { + log_error(cds)("Unable to map CDS archive -- os::vm_allocation_granularity() expected: " SIZE_FORMAT + " actual: %d", mapinfo->alignment(), os::vm_allocation_granularity()); + return MAP_ARCHIVE_OTHER_FAILURE; + } + + MapArchiveResult result = mapinfo->is_static() ? + mapinfo->map_regions(static_regions, static_regions_count, mapped_base_address, rs) : + mapinfo->map_regions(dynamic_regions, dynamic_regions_count, mapped_base_address, rs); + + if (result != MAP_ARCHIVE_SUCCESS) { + unmap_archive(mapinfo); + return result; + } + + if (mapinfo->is_static()) { + if (!mapinfo->validate_shared_path_table()) { + unmap_archive(mapinfo); + return MAP_ARCHIVE_OTHER_FAILURE; + } + } else { + if (!DynamicArchive::validate(mapinfo)) { + unmap_archive(mapinfo); + return MAP_ARCHIVE_OTHER_FAILURE; + } + } + + mapinfo->set_is_mapped(true); + return MAP_ARCHIVE_SUCCESS; +} + +void MetaspaceShared::unmap_archive(FileMapInfo* mapinfo) { + assert(UseSharedSpaces, "must be runtime"); + if (mapinfo != NULL) { + if (mapinfo->is_static()) { + mapinfo->unmap_regions(static_regions, static_regions_count); + } else { + mapinfo->unmap_regions(dynamic_regions, dynamic_regions_count); + } + mapinfo->set_is_mapped(false); } } @@ -2023,17 +2350,15 @@ // serialize it out to its various destinations. void MetaspaceShared::initialize_shared_spaces() { - FileMapInfo *mapinfo = FileMapInfo::current_info(); - _i2i_entry_code_buffers = mapinfo->i2i_entry_code_buffers(); - _i2i_entry_code_buffers_size = mapinfo->i2i_entry_code_buffers_size(); - // _core_spaces_size is loaded from the shared archive immediatelly after mapping - assert(_core_spaces_size == mapinfo->core_spaces_size(), "sanity"); - char* buffer = mapinfo->misc_data_patching_start(); + FileMapInfo *static_mapinfo = FileMapInfo::current_info(); + _i2i_entry_code_buffers = static_mapinfo->i2i_entry_code_buffers(); + _i2i_entry_code_buffers_size = static_mapinfo->i2i_entry_code_buffers_size(); + char* buffer = static_mapinfo->misc_data_patching_start(); clone_cpp_vtables((intptr_t*)buffer); // Verify various attributes of the archive, plus initialize the // shared string/symbol tables - buffer = mapinfo->serialized_data_start(); + buffer = static_mapinfo->serialized_data_start(); intptr_t* array = (intptr_t*)buffer; ReadClosure rc(&array); serialize(&rc); @@ -2041,17 +2366,26 @@ // Initialize the run-time symbol table. SymbolTable::create_table(); - mapinfo->patch_archived_heap_embedded_pointers(); + static_mapinfo->patch_archived_heap_embedded_pointers(); // Close the mapinfo file - mapinfo->close(); + static_mapinfo->close(); + + FileMapInfo *dynamic_mapinfo = FileMapInfo::dynamic_info(); + if (dynamic_mapinfo != NULL) { + intptr_t* buffer = (intptr_t*)dynamic_mapinfo->serialized_data_start(); + ReadClosure rc(&buffer); + SymbolTable::serialize_shared_table_header(&rc, false); + SystemDictionaryShared::serialize_dictionary_headers(&rc, false); + dynamic_mapinfo->close(); + } if (PrintSharedArchiveAndExit) { if (PrintSharedDictionary) { tty->print_cr("\nShared classes:\n"); SystemDictionaryShared::print_on(tty); } - if (_archive_loading_failed) { + if (FileMapInfo::current_info() == NULL || _archive_loading_failed) { tty->print_cr("archive is invalid"); vm_exit(1); } else { @@ -2094,3 +2428,10 @@ vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name), "Please reduce the number of shared classes."); } + +// This is used to relocate the pointers so that the archive can be mapped at +// Arguments::default_SharedBaseAddress() without runtime relocation. +intx MetaspaceShared::final_delta() { + return intx(Arguments::default_SharedBaseAddress()) // We want the archive to be mapped to here at runtime + - intx(SharedBaseAddress); // .. but the archive is mapped at here at dump time +} diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/memory/metaspaceShared.hpp --- a/src/hotspot/share/memory/metaspaceShared.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/memory/metaspaceShared.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -37,6 +37,13 @@ #define MAX_SHARED_DELTA (0x7FFFFFFF) class FileMapInfo; +class CHeapBitMap; + +enum MapArchiveResult { + MAP_ARCHIVE_SUCCESS, + MAP_ARCHIVE_MMAP_FAILURE, + MAP_ARCHIVE_OTHER_FAILURE +}; class MetaspaceSharedStats { public: @@ -62,13 +69,7 @@ char* expand_top_to(char* newtop); char* allocate(size_t num_bytes, size_t alignment=BytesPerWord); - void append_intptr_t(intptr_t n) { - assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment"); - intptr_t *p = (intptr_t*)_top; - char* newtop = _top + sizeof(intptr_t); - expand_top_to(newtop); - *p = n; - } + void append_intptr_t(intptr_t n, bool need_to_mark = false); char* base() const { return _base; } char* top() const { return _top; } @@ -117,17 +118,15 @@ } void do_ptr(void** p) { - _dump_region->append_intptr_t((intptr_t)*p); + _dump_region->append_intptr_t((intptr_t)*p, true); } void do_u4(u4* p) { - void* ptr = (void*)(uintx(*p)); - do_ptr(&ptr); + _dump_region->append_intptr_t((intptr_t)(*p)); } void do_bool(bool *p) { - void* ptr = (void*)(uintx(*p)); - do_ptr(&ptr); + _dump_region->append_intptr_t((intptr_t)(*p)); } void do_tag(int tag) { @@ -170,7 +169,7 @@ bool reading() const { return true; } }; -#endif +#endif // INCLUDE_CDS // Class Data Sharing Support class MetaspaceShared : AllStatic { @@ -187,6 +186,7 @@ static size_t _i2i_entry_code_buffers_size; static size_t _core_spaces_size; static void* _shared_metaspace_static_top; + static intx _relocation_delta; public: enum { // core archive spaces @@ -194,11 +194,12 @@ rw = 1, // read-write shared space in the heap ro = 2, // read-only shared space in the heap md = 3, // miscellaneous data for initializing tables, etc. - num_core_spaces = 4, // number of non-string regions - num_non_heap_spaces = 4, + bm = 4, // relocation bitmaps (freed after file mapping is finished) + num_core_region = 4, + num_non_heap_spaces = 5, // mapped java heap regions - first_closed_archive_heap_region = md + 1, + first_closed_archive_heap_region = bm + 1, max_closed_archive_heap_region = 2, last_closed_archive_heap_region = first_closed_archive_heap_region + max_closed_archive_heap_region - 1, first_open_archive_heap_region = last_closed_archive_heap_region + 1, @@ -220,16 +221,14 @@ CDS_ONLY(return &_shared_rs); NOT_CDS(return NULL); } + + static void set_shared_rs(ReservedSpace rs) { + CDS_ONLY(_shared_rs = rs); + } + static void commit_shared_space_to(char* newtop) NOT_CDS_RETURN; - static size_t core_spaces_size() { - assert(DumpSharedSpaces || UseSharedSpaces, "sanity"); - assert(_core_spaces_size != 0, "sanity"); - return _core_spaces_size; - } static void initialize_dumptime_shared_and_meta_spaces() NOT_CDS_RETURN; static void initialize_runtime_shared_and_meta_spaces() NOT_CDS_RETURN; - static char* initialize_dynamic_runtime_shared_spaces( - char* static_start, char* static_end) NOT_CDS_RETURN_(NULL); static void post_initialize(TRAPS) NOT_CDS_RETURN; // Delta of this object from SharedBaseAddress @@ -245,22 +244,25 @@ static void set_archive_loading_failed() { _archive_loading_failed = true; } + static bool is_in_output_space(void* ptr) { + assert(DumpSharedSpaces, "must be"); + return shared_rs()->contains(ptr); + } + static bool map_shared_spaces(FileMapInfo* mapinfo) NOT_CDS_RETURN_(false); static void initialize_shared_spaces() NOT_CDS_RETURN; // Return true if given address is in the shared metaspace regions (i.e., excluding any // mapped shared heap regions.) static bool is_in_shared_metaspace(const void* p) { - // If no shared metaspace regions are mapped, MetaspceObj::_shared_metaspace_{base,top} will - // both be NULL and all values of p will be rejected quickly. - return (p < MetaspaceObj::shared_metaspace_top() && p >= MetaspaceObj::shared_metaspace_base()); + return MetaspaceObj::is_shared((const MetaspaceObj*)p); } static address shared_metaspace_top() { return (address)MetaspaceObj::shared_metaspace_top(); } - static void set_shared_metaspace_range(void* base, void* top) NOT_CDS_RETURN; + static void set_shared_metaspace_range(void* base, void *static_top, void* top) NOT_CDS_RETURN; // Return true if given address is in the shared region corresponding to the idx static bool is_in_shared_region(const void* p, int idx) NOT_CDS_RETURN_(false); @@ -298,8 +300,8 @@ static void link_and_cleanup_shared_classes(TRAPS); #if INCLUDE_CDS - static ReservedSpace* reserve_shared_rs(size_t size, size_t alignment, - bool large, char* requested_address); + static ReservedSpace reserve_shared_space(size_t size, char* requested_address = NULL); + static size_t reserved_space_alignment(); static void init_shared_dump_space(DumpRegion* first_space, address first_space_bottom = NULL); static DumpRegion* misc_code_dump_space(); static DumpRegion* read_write_dump_space(); @@ -342,11 +344,35 @@ } static void relocate_klass_ptr(oop o); - static Klass* get_relocated_klass(Klass *k); + static Klass* get_relocated_klass(Klass *k, bool is_final=false); static intptr_t* fix_cpp_vtable_for_dynamic_archive(MetaspaceObj::Type msotype, address obj); + static void initialize_ptr_marker(CHeapBitMap* ptrmap); + // Non-zero if the archive(s) need to be mapped a non-default location due to ASLR. + static intx relocation_delta() { return _relocation_delta; } + static intx final_delta(); + static bool use_windows_memory_mapping() { + const bool is_windows = (NOT_WINDOWS(false) WINDOWS_ONLY(true)); + //const bool is_windows = true; // enable this to allow testing the windows mmap semantics on Linux, etc. + return is_windows; + } private: static void read_extra_data(const char* filename, TRAPS) NOT_CDS_RETURN; + static FileMapInfo* open_static_archive(); + static FileMapInfo* open_dynamic_archive(); + static MapArchiveResult map_archives(FileMapInfo* static_mapinfo, FileMapInfo* dynamic_mapinfo, + bool use_requested_addr); + static char* reserve_address_space_for_archives(FileMapInfo* static_mapinfo, + FileMapInfo* dynamic_mapinfo, + bool use_requested_addr, + ReservedSpace& main_rs, + ReservedSpace& archive_space_rs, + ReservedSpace& class_space_rs); + static void release_reserved_spaces(ReservedSpace& main_rs, + ReservedSpace& archive_space_rs, + ReservedSpace& class_space_rs); + static MapArchiveResult map_archive(FileMapInfo* mapinfo, char* mapped_base_address, ReservedSpace rs); + static void unmap_archive(FileMapInfo* mapinfo); }; #endif // SHARE_MEMORY_METASPACESHARED_HPP diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/memory/universe.cpp --- a/src/hotspot/share/memory/universe.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/memory/universe.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -85,18 +85,24 @@ #include "utilities/ostream.hpp" #include "utilities/preserveException.hpp" +#define PRIMITIVE_MIRRORS_DO(func) \ + func(_int_mirror) \ + func(_float_mirror) \ + func(_double_mirror) \ + func(_byte_mirror) \ + func(_bool_mirror) \ + func(_char_mirror) \ + func(_long_mirror) \ + func(_short_mirror) \ + func(_void_mirror) + +#define DEFINE_PRIMITIVE_MIRROR(m) \ + oop Universe::m = NULL; + // Known objects +PRIMITIVE_MIRRORS_DO(DEFINE_PRIMITIVE_MIRROR) Klass* Universe::_typeArrayKlassObjs[T_LONG+1] = { NULL /*, NULL...*/ }; Klass* Universe::_objectArrayKlassObj = NULL; -oop Universe::_int_mirror = NULL; -oop Universe::_float_mirror = NULL; -oop Universe::_double_mirror = NULL; -oop Universe::_byte_mirror = NULL; -oop Universe::_bool_mirror = NULL; -oop Universe::_char_mirror = NULL; -oop Universe::_long_mirror = NULL; -oop Universe::_short_mirror = NULL; -oop Universe::_void_mirror = NULL; oop Universe::_mirrors[T_VOID+1] = { NULL /*, NULL...*/ }; oop Universe::_main_thread_group = NULL; oop Universe::_system_thread_group = NULL; @@ -167,17 +173,11 @@ } } -void Universe::oops_do(OopClosure* f) { +#define DO_PRIMITIVE_MIRROR(m) \ + f->do_oop((oop*) &m); - f->do_oop((oop*) &_int_mirror); - f->do_oop((oop*) &_float_mirror); - f->do_oop((oop*) &_double_mirror); - f->do_oop((oop*) &_byte_mirror); - f->do_oop((oop*) &_bool_mirror); - f->do_oop((oop*) &_char_mirror); - f->do_oop((oop*) &_long_mirror); - f->do_oop((oop*) &_short_mirror); - f->do_oop((oop*) &_void_mirror); +void Universe::oops_do(OopClosure* f) { + PRIMITIVE_MIRRORS_DO(DO_PRIMITIVE_MIRROR); for (int i = T_BOOLEAN; i < T_VOID+1; i++) { f->do_oop((oop*) &_mirrors[i]); @@ -231,6 +231,13 @@ _do_stack_walk_cache->metaspace_pointers_do(it); } +#define ASSERT_MIRROR_NULL(m) \ + assert(m == NULL, "archived mirrors should be NULL"); + +#define SERIALIZE_MIRROR(m) \ + f->do_oop(&m); \ + if (m != NULL) { java_lang_Class::update_archived_primitive_mirror_native_pointers(m); } + // Serialize metadata and pointers to primitive type mirrors in and out of CDS archive void Universe::serialize(SerializeClosure* f) { @@ -239,25 +246,12 @@ } f->do_ptr((void**)&_objectArrayKlassObj); + #if INCLUDE_CDS_JAVA_HEAP -#ifdef ASSERT - if (DumpSharedSpaces && !HeapShared::is_heap_object_archiving_allowed()) { - assert(_int_mirror == NULL && _float_mirror == NULL && - _double_mirror == NULL && _byte_mirror == NULL && - _bool_mirror == NULL && _char_mirror == NULL && - _long_mirror == NULL && _short_mirror == NULL && - _void_mirror == NULL, "mirrors should be NULL"); - } -#endif - f->do_oop(&_int_mirror); - f->do_oop(&_float_mirror); - f->do_oop(&_double_mirror); - f->do_oop(&_byte_mirror); - f->do_oop(&_bool_mirror); - f->do_oop(&_char_mirror); - f->do_oop(&_long_mirror); - f->do_oop(&_short_mirror); - f->do_oop(&_void_mirror); + DEBUG_ONLY(if (DumpSharedSpaces && !HeapShared::is_heap_object_archiving_allowed()) { + PRIMITIVE_MIRRORS_DO(ASSERT_MIRROR_NULL); + }); + PRIMITIVE_MIRRORS_DO(SERIALIZE_MIRROR); #endif f->do_ptr((void**)&_the_array_interfaces_array); @@ -419,18 +413,18 @@ #endif } +#define ASSERT_MIRROR_NOT_NULL(m) \ + assert(m != NULL, "archived mirrors should not be NULL"); + void Universe::initialize_basic_type_mirrors(TRAPS) { #if INCLUDE_CDS_JAVA_HEAP if (UseSharedSpaces && HeapShared::open_archive_heap_region_mapped() && _int_mirror != NULL) { assert(HeapShared::is_heap_object_archiving_allowed(), "Sanity"); - assert(_float_mirror != NULL && _double_mirror != NULL && - _byte_mirror != NULL && _byte_mirror != NULL && - _bool_mirror != NULL && _char_mirror != NULL && - _long_mirror != NULL && _short_mirror != NULL && - _void_mirror != NULL, "Sanity"); + PRIMITIVE_MIRRORS_DO(ASSERT_MIRROR_NOT_NULL); } else + // _int_mirror could be NULL if archived heap is not mapped. #endif { _int_mirror = diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/oops/constMethod.cpp --- a/src/hotspot/share/oops/constMethod.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/oops/constMethod.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -420,6 +420,8 @@ if (has_default_annotations()) { it->push(default_annotations_addr()); } + ConstMethod* this_ptr = this; + it->push_method_entry(&this_ptr, (intptr_t*)&_adapter_trampoline); } // Printing diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/oops/instanceKlass.cpp --- a/src/hotspot/share/oops/instanceKlass.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/oops/instanceKlass.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -1577,11 +1577,30 @@ } #endif -static int binary_search(const Array* methods, const Symbol* name) { +bool InstanceKlass::_disable_method_binary_search = false; + +int InstanceKlass::quick_search(const Array* methods, const Symbol* name) { int len = methods->length(); - // methods are sorted, so do binary search int l = 0; int h = len - 1; + + if (_disable_method_binary_search) { + // At the final stage of dynamic dumping, the methods array may not be sorted + // by ascending addresses of their names, so we can't use binary search anymore. + // However, methods with the same name are still laid out consecutively inside the + // methods array, so let's look for the first one that matches. + assert(DynamicDumpSharedSpaces, "must be"); + while (l <= h) { + Method* m = methods->at(l); + if (m->name() == name) { + return l; + } + l ++; + } + return -1; + } + + // methods are sorted by ascending addresses of their names, so do binary search while (l <= h) { int mid = (l + h) >> 1; Method* m = methods->at(mid); @@ -1733,7 +1752,7 @@ const bool skipping_overpass = (overpass_mode == skip_overpass); const bool skipping_static = (static_mode == skip_static); const bool skipping_private = (private_mode == skip_private); - const int hit = binary_search(methods, name); + const int hit = quick_search(methods, name); if (hit != -1) { const Method* const m = methods->at(hit); @@ -1784,7 +1803,7 @@ const Symbol* name, int* end_ptr) { assert(end_ptr != NULL, "just checking"); - int start = binary_search(methods, name); + int start = quick_search(methods, name); int end = start + 1; if (start != -1) { while (start - 1 >= 0 && (methods->at(start - 1))->name() == name) --start; @@ -2365,6 +2384,7 @@ _breakpoints = NULL; _previous_versions = NULL; _cached_class_file = NULL; + _jvmti_cached_class_field_map = NULL; #endif _init_thread = NULL; @@ -2373,6 +2393,8 @@ _oop_map_cache = NULL; // clear _nest_host to ensure re-load at runtime _nest_host = NULL; + _package_entry = NULL; + _dep_context_last_cleaned = 0; } void InstanceKlass::remove_java_mirror() { diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/oops/instanceKlass.hpp --- a/src/hotspot/share/oops/instanceKlass.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/oops/instanceKlass.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -329,6 +329,8 @@ friend class SystemDictionary; + static bool _disable_method_binary_search; + public: u2 loader_type() { return _misc_flags & loader_type_bits(); @@ -564,6 +566,14 @@ bool find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const; bool find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const; + private: + static int quick_search(const Array* methods, const Symbol* name); + + public: + static void disable_method_binary_search() { + _disable_method_binary_search = true; + } + // find a local method (returns NULL if not found) Method* find_method(const Symbol* name, const Symbol* signature) const; static Method* find_method(const Array* methods, diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/oops/method.cpp --- a/src/hotspot/share/oops/method.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/oops/method.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -332,18 +332,21 @@ return align_metadata_size(header_size() + extra_words); } - Symbol* Method::klass_name() const { return method_holder()->name(); } - void Method::metaspace_pointers_do(MetaspaceClosure* it) { log_trace(cds)("Iter(Method): %p", this); it->push(&_constMethod); it->push(&_method_data); it->push(&_method_counters); + + Method* this_ptr = this; + it->push_method_entry(&this_ptr, (intptr_t*)&_i2i_entry); + it->push_method_entry(&this_ptr, (intptr_t*)&_from_compiled_entry); + it->push_method_entry(&this_ptr, (intptr_t*)&_from_interpreted_entry); } // Attempt to return method oop to original state. Clear any pointers @@ -1741,12 +1744,15 @@ // This is only done during class loading, so it is OK to assume method_idnum matches the methods() array // default_methods also uses this without the ordering for fast find_method -void Method::sort_methods(Array* methods, bool set_idnums) { +void Method::sort_methods(Array* methods, bool set_idnums, method_comparator_func func) { int length = methods->length(); if (length > 1) { + if (func == NULL) { + func = method_comparator; + } { NoSafepointVerifier nsv; - QuickSort::sort(methods->data(), length, method_comparator, /*idempotent=*/false); + QuickSort::sort(methods->data(), length, func, /*idempotent=*/false); } // Reset method ordering if (set_idnums) { diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/oops/method.hpp --- a/src/hotspot/share/oops/method.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/oops/method.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -1006,8 +1006,10 @@ void print_name(outputStream* st = tty) PRODUCT_RETURN; // prints as "virtual void foo(int)" #endif + typedef int (*method_comparator_func)(Method* a, Method* b); + // Helper routine used for method sorting - static void sort_methods(Array* methods, bool set_idnums = true); + static void sort_methods(Array* methods, bool set_idnums = true, method_comparator_func func = NULL); // Deallocation function for redefine classes or if an error occurs void deallocate_contents(ClassLoaderData* loader_data); diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/opto/chaitin.cpp --- a/src/hotspot/share/opto/chaitin.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/opto/chaitin.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -1169,7 +1169,7 @@ lrgs(lo)._next = _simplified; _simplified = lo; // If this guy is "at risk" then mark his current neighbors - if( lrgs(lo)._at_risk ) { + if (lrgs(lo)._at_risk && !_ifg->neighbors(lo)->is_empty()) { IndexSetIterator elements(_ifg->neighbors(lo)); uint datum; while ((datum = elements.next()) != 0) { @@ -1178,7 +1178,10 @@ } // Yank this guy from the IFG. - IndexSet *adj = _ifg->remove_node( lo ); + IndexSet *adj = _ifg->remove_node(lo); + if (adj->is_empty()) { + continue; + } // If any neighbors' degrees fall below their number of // allowed registers, then put that neighbor on the low degree @@ -1202,8 +1205,11 @@ // Pull from hi-degree list uint prev = n->_prev; uint next = n->_next; - if (prev) lrgs(prev)._next = next; - else _hi_degree = next; + if (prev) { + lrgs(prev)._next = next; + } else { + _hi_degree = next; + } lrgs(next)._prev = prev; n->_next = _lo_degree; _lo_degree = neighbor; @@ -1314,7 +1320,7 @@ // Check for "at_risk" LRG's uint risk_lrg = _lrg_map.find(lrg._risk_bias); - if( risk_lrg != 0 ) { + if (risk_lrg != 0 && !_ifg->neighbors(risk_lrg)->is_empty()) { // Walk the colored neighbors of the "at_risk" candidate // Choose a color which is both legal and already taken by a neighbor // of the "at_risk" candidate in order to improve the chances of the @@ -1330,7 +1336,7 @@ } uint copy_lrg = _lrg_map.find(lrg._copy_bias); - if( copy_lrg != 0 ) { + if (copy_lrg != 0) { // If he has a color, if(!_ifg->_yanked->test(copy_lrg)) { OptoReg::Name reg = lrgs(copy_lrg).reg(); @@ -1432,41 +1438,43 @@ // Remove neighbor colors IndexSet *s = _ifg->neighbors(lidx); + debug_only(RegMask orig_mask = lrg->mask();) - debug_only(RegMask orig_mask = lrg->mask();) - IndexSetIterator elements(s); - uint neighbor; - while ((neighbor = elements.next()) != 0) { - // Note that neighbor might be a spill_reg. In this case, exclusion - // of its color will be a no-op, since the spill_reg chunk is in outer - // space. Also, if neighbor is in a different chunk, this exclusion - // will be a no-op. (Later on, if lrg runs out of possible colors in - // its chunk, a new chunk of color may be tried, in which case - // examination of neighbors is started again, at retry_next_chunk.) - LRG &nlrg = lrgs(neighbor); - OptoReg::Name nreg = nlrg.reg(); - // Only subtract masks in the same chunk - if( nreg >= chunk && nreg < chunk + RegMask::CHUNK_SIZE ) { + if (!s->is_empty()) { + IndexSetIterator elements(s); + uint neighbor; + while ((neighbor = elements.next()) != 0) { + // Note that neighbor might be a spill_reg. In this case, exclusion + // of its color will be a no-op, since the spill_reg chunk is in outer + // space. Also, if neighbor is in a different chunk, this exclusion + // will be a no-op. (Later on, if lrg runs out of possible colors in + // its chunk, a new chunk of color may be tried, in which case + // examination of neighbors is started again, at retry_next_chunk.) + LRG &nlrg = lrgs(neighbor); + OptoReg::Name nreg = nlrg.reg(); + // Only subtract masks in the same chunk + if (nreg >= chunk && nreg < chunk + RegMask::CHUNK_SIZE) { #ifndef PRODUCT - uint size = lrg->mask().Size(); - RegMask rm = lrg->mask(); + uint size = lrg->mask().Size(); + RegMask rm = lrg->mask(); #endif - lrg->SUBTRACT(nlrg.mask()); + lrg->SUBTRACT(nlrg.mask()); #ifndef PRODUCT - if (trace_spilling() && lrg->mask().Size() != size) { - ttyLocker ttyl; - tty->print("L%d ", lidx); - rm.dump(); - tty->print(" intersected L%d ", neighbor); - nlrg.mask().dump(); - tty->print(" removed "); - rm.SUBTRACT(lrg->mask()); - rm.dump(); - tty->print(" leaving "); - lrg->mask().dump(); - tty->cr(); + if (trace_spilling() && lrg->mask().Size() != size) { + ttyLocker ttyl; + tty->print("L%d ", lidx); + rm.dump(); + tty->print(" intersected L%d ", neighbor); + nlrg.mask().dump(); + tty->print(" removed "); + rm.SUBTRACT(lrg->mask()); + rm.dump(); + tty->print(" leaving "); + lrg->mask().dump(); + tty->cr(); + } +#endif } -#endif } } //assert(is_allstack == lrg->mask().is_AllStack(), "nbrs must not change AllStackedness"); @@ -1827,7 +1835,7 @@ // Found a safepoint? JVMState *jvms = n->jvms(); - if( jvms ) { + if (jvms && !liveout.is_empty()) { // Now scan for a live derived pointer IndexSetIterator elements(&liveout); uint neighbor; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/opto/coalesce.cpp --- a/src/hotspot/share/opto/coalesce.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/opto/coalesce.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -602,29 +602,42 @@ // Some original neighbors of lr1 might have gone away // because the constrained register mask prevented them. // Remove lr1 from such neighbors. - IndexSetIterator one(n_lr1); - uint neighbor; + uint neighbor = 0; LRG &lrg1 = lrgs(lr1); - while ((neighbor = one.next()) != 0) - if( !_ulr.member(neighbor) ) - if( _phc._ifg->neighbors(neighbor)->remove(lr1) ) - lrgs(neighbor).inc_degree( -lrg1.compute_degree(lrgs(neighbor)) ); + if (!n_lr1->is_empty()) { + IndexSetIterator one(n_lr1); + while ((neighbor = one.next()) != 0) { + if (!_ulr.member(neighbor)) { + if (_phc._ifg->neighbors(neighbor)->remove(lr1)) { + lrgs(neighbor).inc_degree(-lrg1.compute_degree(lrgs(neighbor))); + } + } + } + } // lr2 is now called (coalesced into) lr1. // Remove lr2 from the IFG. - IndexSetIterator two(n_lr2); LRG &lrg2 = lrgs(lr2); - while ((neighbor = two.next()) != 0) - if( _phc._ifg->neighbors(neighbor)->remove(lr2) ) - lrgs(neighbor).inc_degree( -lrg2.compute_degree(lrgs(neighbor)) ); + if (!n_lr2->is_empty()) { + IndexSetIterator two(n_lr2); + while ((neighbor = two.next()) != 0) { + if (_phc._ifg->neighbors(neighbor)->remove(lr2)) { + lrgs(neighbor).inc_degree(-lrg2.compute_degree(lrgs(neighbor))); + } + } + } // Some neighbors of intermediate copies now interfere with the // combined live range. - IndexSetIterator three(&_ulr); - while ((neighbor = three.next()) != 0) - if( _phc._ifg->neighbors(neighbor)->insert(lr1) ) - lrgs(neighbor).inc_degree( lrg1.compute_degree(lrgs(neighbor)) ); + if (!_ulr.is_empty()) { + IndexSetIterator three(&_ulr); + while ((neighbor = three.next()) != 0) { + if (_phc._ifg->neighbors(neighbor)->insert(lr1)) { + lrgs(neighbor).inc_degree(lrg1.compute_degree(lrgs(neighbor))); + } + } + } } static void record_bias( const PhaseIFG *ifg, int lr1, int lr2 ) { diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/opto/ifg.cpp --- a/src/hotspot/share/opto/ifg.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/opto/ifg.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -80,11 +80,13 @@ assert( !_is_square, "only on triangular" ); // Simple transpose - for( uint i = 0; i < _maxlrg; i++ ) { - IndexSetIterator elements(&_adjs[i]); - uint datum; - while ((datum = elements.next()) != 0) { - _adjs[datum].insert( i ); + for(uint i = 0; i < _maxlrg; i++ ) { + if (!_adjs[i].is_empty()) { + IndexSetIterator elements(&_adjs[i]); + uint datum; + while ((datum = elements.next()) != 0) { + _adjs[datum].insert(i); + } } } _is_square = true; @@ -108,16 +110,18 @@ } // Union edges of B into A -void PhaseIFG::Union( uint a, uint b ) { +void PhaseIFG::Union(uint a, uint b) { assert( _is_square, "only on square" ); IndexSet *A = &_adjs[a]; - IndexSetIterator b_elements(&_adjs[b]); - uint datum; - while ((datum = b_elements.next()) != 0) { - if(A->insert(datum)) { - _adjs[datum].insert(a); - lrgs(a).invalid_degree(); - lrgs(datum).invalid_degree(); + if (!_adjs[b].is_empty()) { + IndexSetIterator b_elements(&_adjs[b]); + uint datum; + while ((datum = b_elements.next()) != 0) { + if (A->insert(datum)) { + _adjs[datum].insert(a); + lrgs(a).invalid_degree(); + lrgs(datum).invalid_degree(); + } } } } @@ -130,12 +134,15 @@ _yanked->set(a); // I remove the LRG from all neighbors. - IndexSetIterator elements(&_adjs[a]); LRG &lrg_a = lrgs(a); - uint datum; - while ((datum = elements.next()) != 0) { - _adjs[datum].remove(a); - lrgs(datum).inc_degree( -lrg_a.compute_degree(lrgs(datum)) ); + + if (!_adjs[a].is_empty()) { + IndexSetIterator elements(&_adjs[a]); + uint datum; + while ((datum = elements.next()) != 0) { + _adjs[datum].remove(a); + lrgs(datum).inc_degree(-lrg_a.compute_degree(lrgs(datum))); + } } return neighbors(a); } @@ -146,6 +153,8 @@ assert( _yanked->test(a), "" ); _yanked->remove(a); + if (_adjs[a].is_empty()) return; + IndexSetIterator elements(&_adjs[a]); uint datum; while ((datum = elements.next()) != 0) { @@ -159,7 +168,7 @@ // mis-aligned (or for Fat-Projections, not-adjacent) then we have to // MULTIPLY the sizes. Inspect Brigg's thesis on register pairs to see why // this is so. -int LRG::compute_degree( LRG &l ) const { +int LRG::compute_degree(LRG &l) const { int tmp; int num_regs = _num_regs; int nregs = l.num_regs(); @@ -174,14 +183,15 @@ // mis-aligned (or for Fat-Projections, not-adjacent) then we have to // MULTIPLY the sizes. Inspect Brigg's thesis on register pairs to see why // this is so. -int PhaseIFG::effective_degree( uint lidx ) const { +int PhaseIFG::effective_degree(uint lidx) const { + IndexSet *s = neighbors(lidx); + if (s->is_empty()) return 0; int eff = 0; int num_regs = lrgs(lidx).num_regs(); int fat_proj = lrgs(lidx)._fat_proj; - IndexSet *s = neighbors(lidx); IndexSetIterator elements(s); uint nidx; - while((nidx = elements.next()) != 0) { + while ((nidx = elements.next()) != 0) { LRG &lrgn = lrgs(nidx); int nregs = lrgn.num_regs(); eff += (fat_proj || lrgn._fat_proj) // either is a fat-proj? @@ -196,14 +206,16 @@ void PhaseIFG::dump() const { tty->print_cr("-- Interference Graph --%s--", _is_square ? "square" : "triangular" ); - if( _is_square ) { - for( uint i = 0; i < _maxlrg; i++ ) { + if (_is_square) { + for (uint i = 0; i < _maxlrg; i++) { tty->print(_yanked->test(i) ? "XX " : " "); tty->print("L%d: { ",i); - IndexSetIterator elements(&_adjs[i]); - uint datum; - while ((datum = elements.next()) != 0) { - tty->print("L%d ", datum); + if (!_adjs[i].is_empty()) { + IndexSetIterator elements(&_adjs[i]); + uint datum; + while ((datum = elements.next()) != 0) { + tty->print("L%d ", datum); + } } tty->print_cr("}"); @@ -221,10 +233,12 @@ tty->print("L%d ",j - 1); } tty->print("| "); - IndexSetIterator elements(&_adjs[i]); - uint datum; - while ((datum = elements.next()) != 0) { - tty->print("L%d ", datum); + if (!_adjs[i].is_empty()) { + IndexSetIterator elements(&_adjs[i]); + uint datum; + while ((datum = elements.next()) != 0) { + tty->print("L%d ", datum); + } } tty->print("}\n"); } @@ -251,16 +265,18 @@ for( uint i = 0; i < _maxlrg; i++ ) { assert(!_yanked->test(i) || !neighbor_cnt(i), "Is removed completely" ); IndexSet *set = &_adjs[i]; - IndexSetIterator elements(set); - uint idx; - uint last = 0; - while ((idx = elements.next()) != 0) { - assert(idx != i, "Must have empty diagonal"); - assert(pc->_lrg_map.find_const(idx) == idx, "Must not need Find"); - assert(_adjs[idx].member(i), "IFG not square"); - assert(!_yanked->test(idx), "No yanked neighbors"); - assert(last < idx, "not sorted increasing"); - last = idx; + if (!set->is_empty()) { + IndexSetIterator elements(set); + uint idx; + uint last = 0; + while ((idx = elements.next()) != 0) { + assert(idx != i, "Must have empty diagonal"); + assert(pc->_lrg_map.find_const(idx) == idx, "Must not need Find"); + assert(_adjs[idx].member(i), "IFG not square"); + assert(!_yanked->test(idx), "No yanked neighbors"); + assert(last < idx, "not sorted increasing"); + last = idx; + } } assert(!lrgs(i)._degree_valid || effective_degree(i) == lrgs(i).degree(), "degree is valid but wrong"); } @@ -273,16 +289,18 @@ * Only interfere if acceptable register masks overlap. */ void PhaseChaitin::interfere_with_live(uint lid, IndexSet* liveout) { - LRG& lrg = lrgs(lid); - const RegMask& rm = lrg.mask(); - IndexSetIterator elements(liveout); - uint interfering_lid = elements.next(); - while (interfering_lid != 0) { - LRG& interfering_lrg = lrgs(interfering_lid); - if (rm.overlap(interfering_lrg.mask())) { - _ifg->add_edge(lid, interfering_lid); + if (!liveout->is_empty()) { + LRG& lrg = lrgs(lid); + const RegMask &rm = lrg.mask(); + IndexSetIterator elements(liveout); + uint interfering_lid = elements.next(); + while (interfering_lid != 0) { + LRG& interfering_lrg = lrgs(interfering_lid); + if (rm.overlap(interfering_lrg.mask())) { + _ifg->add_edge(lid, interfering_lid); + } + interfering_lid = elements.next(); } - interfering_lid = elements.next(); } } @@ -381,6 +399,9 @@ #ifdef ASSERT uint PhaseChaitin::count_int_pressure(IndexSet* liveout) { + if (liveout->is_empty()) { + return 0; + } IndexSetIterator elements(liveout); uint lidx = elements.next(); uint cnt = 0; @@ -397,6 +418,9 @@ } uint PhaseChaitin::count_float_pressure(IndexSet* liveout) { + if (liveout->is_empty()) { + return 0; + } IndexSetIterator elements(liveout); uint lidx = elements.next(); uint cnt = 0; @@ -494,13 +518,15 @@ * the block from the area. */ void PhaseChaitin::compute_initial_block_pressure(Block* b, IndexSet* liveout, Pressure& int_pressure, Pressure& float_pressure, double cost) { - IndexSetIterator elements(liveout); - uint lid = elements.next(); - while (lid != 0) { - LRG& lrg = lrgs(lid); - lrg._area += cost; - raise_pressure(b, lrg, int_pressure, float_pressure); - lid = elements.next(); + if (!liveout->is_empty()) { + IndexSetIterator elements(liveout); + uint lid = elements.next(); + while (lid != 0) { + LRG &lrg = lrgs(lid); + lrg._area += cost; + raise_pressure(b, lrg, int_pressure, float_pressure); + lid = elements.next(); + } } assert(int_pressure.current_pressure() == count_int_pressure(liveout), "the int pressure is incorrect"); assert(float_pressure.current_pressure() == count_float_pressure(liveout), "the float pressure is incorrect"); @@ -512,13 +538,15 @@ * and int/pointer registers. */ void PhaseChaitin::compute_entry_block_pressure(Block* b) { - IndexSet* livein = _live->livein(b); - IndexSetIterator elements(livein); - uint lid = elements.next(); - while (lid != 0) { - LRG& lrg = lrgs(lid); - raise_pressure(b, lrg, _sched_int_pressure, _sched_float_pressure); - lid = elements.next(); + IndexSet *livein = _live->livein(b); + if (!livein->is_empty()) { + IndexSetIterator elements(livein); + uint lid = elements.next(); + while (lid != 0) { + LRG &lrg = lrgs(lid); + raise_pressure(b, lrg, _sched_int_pressure, _sched_float_pressure); + lid = elements.next(); + } } // Now check phis for locally defined inputs for (uint j = 0; j < b->number_of_nodes(); j++) { @@ -546,15 +574,18 @@ * and int/pointer registers. */ void PhaseChaitin::compute_exit_block_pressure(Block* b) { + IndexSet* livein = _live->live(b); - IndexSetIterator elements(livein); _sched_int_pressure.set_current_pressure(0); _sched_float_pressure.set_current_pressure(0); - uint lid = elements.next(); - while (lid != 0) { - LRG& lrg = lrgs(lid); - raise_pressure(b, lrg, _sched_int_pressure, _sched_float_pressure); - lid = elements.next(); + if (!livein->is_empty()) { + IndexSetIterator elements(livein); + uint lid = elements.next(); + while (lid != 0) { + LRG &lrg = lrgs(lid); + raise_pressure(b, lrg, _sched_int_pressure, _sched_float_pressure); + lid = elements.next(); + } } } @@ -654,6 +685,7 @@ * all conflicting parties and avoid the interference. */ void PhaseChaitin::remove_bound_register_from_interfering_live_ranges(LRG& lrg, IndexSet* liveout, uint& must_spill) { + if (liveout->is_empty()) return; // Check for common case const RegMask& rm = lrg.mask(); int r_size = lrg.num_regs(); @@ -833,7 +865,7 @@ Node* n = block->get_node(location); uint lid = _lrg_map.live_range_id(n); - if(lid) { + if (lid) { LRG& lrg = lrgs(lid); // A DEF normally costs block frequency; rematerialized values are diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/opto/indexSet.cpp --- a/src/hotspot/share/opto/indexSet.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/opto/indexSet.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -111,6 +111,9 @@ IndexSet::BitBlock *IndexSet::alloc_block_containing(uint element) { BitBlock *block = alloc_block(); uint bi = get_block_index(element); + if (bi >= _current_block_limit) { + _current_block_limit = bi + 1; + } _blocks[bi] = block; return block; } @@ -125,7 +128,7 @@ assert(block != &_empty_block, "cannot free the empty block"); block->set_next((IndexSet::BitBlock*)Compile::current()->indexSet_free_block_list()); Compile::current()->set_indexSet_free_block_list(block); - set_block(i,&_empty_block); + set_block(i, &_empty_block); } //------------------------------lrg_union-------------------------------------- @@ -168,38 +171,43 @@ // other color. (A variant of the Briggs assertion) uint reg_degree = 0; - uint element; + uint element = 0; // Load up the combined interference set with the neighbors of one - IndexSetIterator elements(one); - while ((element = elements.next()) != 0) { - LRG &lrg = ifg->lrgs(element); - if (mask.overlap(lrg.mask())) { - insert(element); - if( !lrg.mask().is_AllStack() ) { - reg_degree += lrg1.compute_degree(lrg); - if( reg_degree >= fail_degree ) return reg_degree; - } else { - // !!!!! Danger! No update to reg_degree despite having a neighbor. - // A variant of the Briggs assertion. - // Not needed if I simplify during coalesce, ala George/Appel. - assert( lrg.lo_degree(), "" ); + if (!one->is_empty()) { + IndexSetIterator elements(one); + while ((element = elements.next()) != 0) { + LRG &lrg = ifg->lrgs(element); + if (mask.overlap(lrg.mask())) { + insert(element); + if (!lrg.mask().is_AllStack()) { + reg_degree += lrg1.compute_degree(lrg); + if (reg_degree >= fail_degree) return reg_degree; + } else { + // !!!!! Danger! No update to reg_degree despite having a neighbor. + // A variant of the Briggs assertion. + // Not needed if I simplify during coalesce, ala George/Appel. + assert(lrg.lo_degree(), ""); + } } } } // Add neighbors of two as well - IndexSetIterator elements2(two); - while ((element = elements2.next()) != 0) { - LRG &lrg = ifg->lrgs(element); - if (mask.overlap(lrg.mask())) { - if (insert(element)) { - if( !lrg.mask().is_AllStack() ) { - reg_degree += lrg2.compute_degree(lrg); - if( reg_degree >= fail_degree ) return reg_degree; - } else { - // !!!!! Danger! No update to reg_degree despite having a neighbor. - // A variant of the Briggs assertion. - // Not needed if I simplify during coalesce, ala George/Appel. - assert( lrg.lo_degree(), "" ); + + if (!two->is_empty()) { + IndexSetIterator elements2(two); + while ((element = elements2.next()) != 0) { + LRG &lrg = ifg->lrgs(element); + if (mask.overlap(lrg.mask())) { + if (insert(element)) { + if (!lrg.mask().is_AllStack()) { + reg_degree += lrg2.compute_degree(lrg); + if (reg_degree >= fail_degree) return reg_degree; + } else { + // !!!!! Danger! No update to reg_degree despite having a neighbor. + // A variant of the Briggs assertion. + // Not needed if I simplify during coalesce, ala George/Appel. + assert(lrg.lo_degree(), ""); + } } } } @@ -219,6 +227,7 @@ _max_elements = set->_max_elements; #endif _count = set->_count; + _current_block_limit = set->_current_block_limit; _max_blocks = set->_max_blocks; if (_max_blocks <= preallocated_block_list_size) { _blocks = _preallocated_block_list; @@ -248,6 +257,7 @@ _max_elements = max_elements; #endif _count = 0; + _current_block_limit = 0; _max_blocks = (max_elements + bits_per_block - 1) / bits_per_block; if (_max_blocks <= preallocated_block_list_size) { @@ -272,6 +282,7 @@ _max_elements = max_elements; #endif // ASSERT _count = 0; + _current_block_limit = 0; _max_blocks = (max_elements + bits_per_block - 1) / bits_per_block; if (_max_blocks <= preallocated_block_list_size) { @@ -294,7 +305,8 @@ set->check_watch("swap", _serial_number); #endif - for (uint i = 0; i < _max_blocks; i++) { + uint max = MAX2(_current_block_limit, set->_current_block_limit); + for (uint i = 0; i < max; i++) { BitBlock *temp = _blocks[i]; set_block(i, set->_blocks[i]); set->set_block(i, temp); @@ -302,6 +314,11 @@ uint temp = _count; _count = set->_count; set->_count = temp; + + temp = _current_block_limit; + _current_block_limit = set->_current_block_limit; + set->_current_block_limit = temp; + } //---------------------------- IndexSet::dump() ----------------------------- @@ -383,78 +400,6 @@ // Create an iterator for a set. If empty blocks are detected when iterating // over the set, these blocks are replaced. -IndexSetIterator::IndexSetIterator(IndexSet *set) { -#ifdef ASSERT - if (CollectIndexSetStatistics) { - set->tally_iteration_statistics(); - } - set->check_watch("traversed", set->count()); -#endif - if (set->is_empty()) { - _current = 0; - _next_word = IndexSet::words_per_block; - _next_block = 1; - _max_blocks = 1; - - // We don't need the following values when we iterate over an empty set. - // The commented out code is left here to document that the omission - // is intentional. - // - //_value = 0; - //_words = NULL; - //_blocks = NULL; - //_set = NULL; - } else { - _current = 0; - _value = 0; - _next_block = 0; - _next_word = IndexSet::words_per_block; - - _max_blocks = set->_max_blocks; - _words = NULL; - _blocks = set->_blocks; - _set = set; - } -} - -//---------------------------- IndexSetIterator(const) ----------------------------- -// Iterate over a constant IndexSet. - -IndexSetIterator::IndexSetIterator(const IndexSet *set) { -#ifdef ASSERT - if (CollectIndexSetStatistics) { - set->tally_iteration_statistics(); - } - // We don't call check_watch from here to avoid bad recursion. - // set->check_watch("traversed const", set->count()); -#endif - if (set->is_empty()) { - _current = 0; - _next_word = IndexSet::words_per_block; - _next_block = 1; - _max_blocks = 1; - - // We don't need the following values when we iterate over an empty set. - // The commented out code is left here to document that the omission - // is intentional. - // - //_value = 0; - //_words = NULL; - //_blocks = NULL; - //_set = NULL; - } else { - _current = 0; - _value = 0; - _next_block = 0; - _next_word = IndexSet::words_per_block; - - _max_blocks = set->_max_blocks; - _words = NULL; - _blocks = set->_blocks; - _set = NULL; - } -} - //---------------------------- List16Iterator::advance_and_next() ----------------------------- // Advance to the next non-empty word in the set being iterated over. Return the next element // if there is one. If we are done, return 0. This method is called from the next() method @@ -467,10 +412,8 @@ // Found a non-empty word. _value = ((_next_block - 1) * IndexSet::bits_per_block) + (wi * IndexSet::bits_per_word); _current = _words[wi]; - - _next_word = wi+1; - - return next(); + _next_word = wi + 1; + return next_value(); } } @@ -488,8 +431,7 @@ _next_block = bi+1; _next_word = wi+1; - - return next(); + return next_value(); } } @@ -501,11 +443,6 @@ } } - // These assignments make redundant calls to next on a finished iterator - // faster. Probably not necessary. - _next_block = _max_blocks; - _next_word = IndexSet::words_per_block; - // No more words. return 0; } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/opto/indexSet.hpp --- a/src/hotspot/share/opto/indexSet.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/opto/indexSet.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -190,6 +190,9 @@ // The number of elements in the set uint _count; + // The current upper limit of blocks that has been allocated and might be in use + uint _current_block_limit; + // Our top level array of bitvector segments BitBlock **_blocks; @@ -246,12 +249,13 @@ void clear() { _count = 0; - for (uint i = 0; i < _max_blocks; i++) { + for (uint i = 0; i < _current_block_limit; i++) { BitBlock *block = _blocks[i]; if (block != &_empty_block) { free_block(i); } } + _current_block_limit = 0; } uint count() const { return _count; } @@ -380,18 +384,18 @@ // The index of the next word we will inspect uint _next_word; - // A pointer to the contents of the current block - uint32_t *_words; - // The index of the next block we will inspect uint _next_block; + // The number of blocks in the set + uint _max_blocks; + + // A pointer to the contents of the current block + uint32_t *_words; + // A pointer to the blocks in our set IndexSet::BitBlock **_blocks; - // The number of blocks in the set - uint _max_blocks; - // If the iterator was created from a non-const set, we replace // non-canonical empty blocks with the _empty_block pointer. If // _set is NULL, we do no replacement. @@ -405,22 +409,64 @@ // If an iterator is built from a constant set then empty blocks // are not canonicalized. - IndexSetIterator(IndexSet *set); - IndexSetIterator(const IndexSet *set); + IndexSetIterator(IndexSet *set) : + _current(0), + _value(0), + _next_word(IndexSet::words_per_block), + _next_block(0), + _max_blocks(set->is_empty() ? 0 : set->_current_block_limit), + _words(NULL), + _blocks(set->_blocks), + _set(set) { + #ifdef ASSERT + if (CollectIndexSetStatistics) { + set->tally_iteration_statistics(); + } + set->check_watch("traversed", set->count()); + #endif + } + + IndexSetIterator(const IndexSet *set) : + _current(0), + _value(0), + _next_word(IndexSet::words_per_block), + _next_block(0), + _max_blocks(set->is_empty() ? 0 : set->_current_block_limit), + _words(NULL), + _blocks(set->_blocks), + _set(NULL) + { + #ifdef ASSERT + if (CollectIndexSetStatistics) { + set->tally_iteration_statistics(); + } + // We don't call check_watch from here to avoid bad recursion. + // set->check_watch("traversed const", set->count()); + #endif + } + + // Return the next element of the set. + uint next_value() { + uint current = _current; + assert(current != 0, "sanity"); + uint advance = count_trailing_zeros(current); + assert(((current >> advance) & 0x1) == 1, "sanity"); + _current = (current >> advance) - 1; + _value += advance; + return _value; + } // Return the next element of the set. Return 0 when done. uint next() { - uint current = _current; - if (current != 0) { - uint advance = count_trailing_zeros(current); - assert(((current >> advance) & 0x1) == 1, "sanity"); - _current = (current >> advance) - 1; - _value += advance; - return _value; + if (_current != 0) { + return next_value(); + } else if (_next_word < IndexSet::words_per_block || _next_block < _max_blocks) { + return advance_and_next(); } else { - return advance_and_next(); + return 0; } } + }; #endif // SHARE_OPTO_INDEXSET_HPP diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/opto/live.cpp --- a/src/hotspot/share/opto/live.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/opto/live.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -84,7 +84,7 @@ // Array of delta-set pointers, indexed by block pre_order-1. _deltas = NEW_RESOURCE_ARRAY(IndexSet*,_cfg.number_of_blocks()); - memset( _deltas, 0, sizeof(IndexSet*)* _cfg.number_of_blocks()); + memset(_deltas, 0, sizeof(IndexSet*)* _cfg.number_of_blocks()); _free_IndexSet = NULL; @@ -108,8 +108,8 @@ uint r = _names.at(n->_idx); assert(!def_outside->member(r), "Use of external LRG overlaps the same LRG defined in this block"); - def->insert( r ); - use->remove( r ); + def->insert(r); + use->remove(r); uint cnt = n->req(); for (uint k = 1; k < cnt; k++) { Node *nk = n->in(k); @@ -152,7 +152,7 @@ while (_worklist->size()) { Block* block = _worklist->pop(); IndexSet *delta = getset(block); - assert( delta->count(), "missing delta set" ); + assert(delta->count(), "missing delta set"); // Add new-live-in to predecessors live-out sets for (uint l = 1; l < block->num_preds(); l++) { @@ -191,36 +191,34 @@ // Get an IndexSet for a block. Return existing one, if any. Make a new // empty one if a prior one does not exist. -IndexSet *PhaseLive::getset( Block *p ) { +IndexSet *PhaseLive::getset(Block *p) { IndexSet *delta = _deltas[p->_pre_order-1]; - if( !delta ) // Not on worklist? + if (!delta) { // Not on worklist? // Get a free set; flag as being on worklist delta = _deltas[p->_pre_order-1] = getfreeset(); + } return delta; // Return set of new live-out items } // Pull from free list, or allocate. Internal allocation on the returned set // is always from thread local storage. -IndexSet *PhaseLive::getfreeset( ) { +IndexSet *PhaseLive::getfreeset() { IndexSet *f = _free_IndexSet; - if( !f ) { + if (!f) { f = new IndexSet; -// f->set_arena(Thread::current()->resource_area()); f->initialize(_maxlrg, Thread::current()->resource_area()); } else { // Pull from free list _free_IndexSet = f->next(); - //f->_cnt = 0; // Reset to empty -// f->set_arena(Thread::current()->resource_area()); f->initialize(_maxlrg, Thread::current()->resource_area()); } return f; } // Free an IndexSet from a block. -void PhaseLive::freeset( Block *p ) { +void PhaseLive::freeset(Block *p) { IndexSet *f = _deltas[p->_pre_order-1]; - if ( _keep_deltas ) { + if (_keep_deltas) { add_livein(p, f); } f->set_next(_free_IndexSet); @@ -230,40 +228,45 @@ // Add a live-out value to a given blocks live-out set. If it is new, then // also add it to the delta set and stick the block on the worklist. -void PhaseLive::add_liveout( Block *p, uint r, VectorSet &first_pass ) { +void PhaseLive::add_liveout(Block *p, uint r, VectorSet &first_pass) { IndexSet *live = &_live[p->_pre_order-1]; - if( live->insert(r) ) { // If actually inserted... + if (live->insert(r)) { // If actually inserted... // We extended the live-out set. See if the value is generated locally. // If it is not, then we must extend the live-in set. - if( !_defs[p->_pre_order-1].member( r ) ) { - if( !_deltas[p->_pre_order-1] && // Not on worklist? - first_pass.test(p->_pre_order) ) + if (!_defs[p->_pre_order-1].member(r)) { + if (!_deltas[p->_pre_order-1] && // Not on worklist? + first_pass.test(p->_pre_order)) { _worklist->push(p); // Actually go on worklist if already 1st pass + } getset(p)->insert(r); } } } // Add a vector of live-out values to a given blocks live-out set. -void PhaseLive::add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass ) { +void PhaseLive::add_liveout(Block *p, IndexSet *lo, VectorSet &first_pass) { IndexSet *live = &_live[p->_pre_order-1]; IndexSet *defs = &_defs[p->_pre_order-1]; IndexSet *on_worklist = _deltas[p->_pre_order-1]; IndexSet *delta = on_worklist ? on_worklist : getfreeset(); - IndexSetIterator elements(lo); - uint r; - while ((r = elements.next()) != 0) { - if( live->insert(r) && // If actually inserted... - !defs->member( r ) ) // and not defined locally - delta->insert(r); // Then add to live-in set + if (!lo->is_empty()) { + IndexSetIterator elements(lo); + uint r; + while ((r = elements.next()) != 0) { + if (live->insert(r) && // If actually inserted... + !defs->member(r)) { // and not defined locally + delta->insert(r); // Then add to live-in set + } + } } - if( delta->count() ) { // If actually added things + if (delta->count()) { // If actually added things _deltas[p->_pre_order-1] = delta; // Flag as on worklist now - if( !on_worklist && // Not on worklist? - first_pass.test(p->_pre_order) ) + if (!on_worklist && // Not on worklist? + first_pass.test(p->_pre_order)) { _worklist->push(p); // Actually go on worklist if already 1st pass + } } else { // Nothing there; just free it delta->set_next(_free_IndexSet); _free_IndexSet = delta; // Drop onto free list @@ -273,23 +276,25 @@ // Add a vector of live-in values to a given blocks live-in set. void PhaseLive::add_livein(Block *p, IndexSet *lo) { IndexSet *livein = &_livein[p->_pre_order-1]; - IndexSetIterator elements(lo); - uint r; - while ((r = elements.next()) != 0) { - livein->insert(r); // Then add to live-in set + if (!livein->is_empty()) { + IndexSetIterator elements(lo); + uint r; + while ((r = elements.next()) != 0) { + livein->insert(r); // Then add to live-in set + } } } #ifndef PRODUCT // Dump the live-out set for a block -void PhaseLive::dump( const Block *b ) const { +void PhaseLive::dump(const Block *b) const { tty->print("Block %d: ",b->_pre_order); - if ( _keep_deltas ) { + if (_keep_deltas) { tty->print("LiveIn: "); _livein[b->_pre_order-1].dump(); } tty->print("LiveOut: "); _live[b->_pre_order-1].dump(); uint cnt = b->number_of_nodes(); - for( uint i=0; iprint("L%d/", _names.at(b->get_node(i)->_idx)); b->get_node(i)->dump(); } @@ -297,7 +302,7 @@ } // Verify that base pointers and derived pointers are still sane. -void PhaseChaitin::verify_base_ptrs( ResourceArea *a ) const { +void PhaseChaitin::verify_base_ptrs(ResourceArea *a) const { #ifdef ASSERT Unique_Node_List worklist(a); for (uint i = 0; i < _cfg.number_of_blocks(); i++) { @@ -322,17 +327,17 @@ worklist.clear(); worklist.push(check); uint k = 0; - while( k < worklist.size() ) { + while (k < worklist.size()) { check = worklist.at(k); assert(check,"Bad base or derived pointer"); // See PhaseChaitin::find_base_for_derived() for all cases. int isc = check->is_Copy(); - if( isc ) { + if (isc) { worklist.push(check->in(isc)); - } else if( check->is_Phi() ) { + } else if (check->is_Phi()) { for (uint m = 1; m < check->req(); m++) worklist.push(check->in(m)); - } else if( check->is_Con() ) { + } else if (check->is_Con()) { if (is_derived) { // Derived is NULL+offset assert(!is_derived || check->bottom_type()->is_ptr()->ptr() == TypePtr::Null,"Bad derived pointer"); @@ -346,8 +351,8 @@ check->bottom_type()->is_ptr()->ptr() == TypePtr::Null,"Bad base pointer"); } } - } else if( check->bottom_type()->is_ptr()->_offset == 0 ) { - if(check->is_Proj() || (check->is_Mach() && + } else if (check->bottom_type()->is_ptr()->_offset == 0) { + if (check->is_Proj() || (check->is_Mach() && (check->as_Mach()->ideal_Opcode() == Op_CreateEx || check->as_Mach()->ideal_Opcode() == Op_ThreadLocal || check->as_Mach()->ideal_Opcode() == Op_CMoveP || @@ -381,7 +386,7 @@ } // Verify that graphs and base pointers are still sane. -void PhaseChaitin::verify( ResourceArea *a, bool verify_ifg ) const { +void PhaseChaitin::verify(ResourceArea *a, bool verify_ifg) const { #ifdef ASSERT if (VerifyRegisterAllocator) { _cfg.verify(); diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/opto/reg_split.cpp --- a/src/hotspot/share/opto/reg_split.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/opto/reg_split.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -1271,10 +1271,12 @@ // it contains no members which compress to defidx. Finding such an // instance may be a case to add liveout adjustment in compress_uf_map(). // See 5063219. - uint member; - IndexSetIterator isi(liveout); - while ((member = isi.next()) != 0) { - assert(defidx != _lrg_map.find_const(member), "Live out member has not been compressed"); + if (!liveout->is_empty()) { + uint member; + IndexSetIterator isi(liveout); + while ((member = isi.next()) != 0) { + assert(defidx != _lrg_map.find_const(member), "Live out member has not been compressed"); + } } #endif Reachblock[slidx] = NULL; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/prims/jvmtiEnv.cpp --- a/src/hotspot/share/prims/jvmtiEnv.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/prims/jvmtiEnv.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -3333,35 +3333,8 @@ // rmonitor - pre-checked for validity jvmtiError JvmtiEnv::RawMonitorWait(JvmtiRawMonitor * rmonitor, jlong millis) { - int r = 0; Thread* thread = Thread::current(); - - if (thread->is_Java_thread()) { - JavaThread* current_thread = (JavaThread*)thread; - - /* Transition to thread_blocked without entering vm state */ - /* This is really evil. Normally you can't undo _thread_blocked */ - /* transitions like this because it would cause us to miss a */ - /* safepoint but since the thread was already in _thread_in_native */ - /* the thread is not leaving a safepoint safe state and it will */ - /* block when it tries to return from native. We can't safepoint */ - /* block in here because we could deadlock the vmthread. Blech. */ - - JavaThreadState state = current_thread->thread_state(); - assert(state == _thread_in_native, "Must be _thread_in_native"); - // frame should already be walkable since we are in native - assert(!current_thread->has_last_Java_frame() || - current_thread->frame_anchor()->walkable(), "Must be walkable"); - current_thread->set_thread_state(_thread_blocked); - - r = rmonitor->raw_wait(millis, true, current_thread); - // restore state, still at a safepoint safe state - current_thread->set_thread_state(state); - - } else { - r = rmonitor->raw_wait(millis, false, thread); - assert(r != JvmtiRawMonitor::M_INTERRUPTED, "non-JavaThread can't be interrupted"); - } + int r = rmonitor->raw_wait(millis, thread); switch (r) { case JvmtiRawMonitor::M_INTERRUPTED: diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/prims/jvmtiRawMonitor.cpp --- a/src/hotspot/share/prims/jvmtiRawMonitor.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/prims/jvmtiRawMonitor.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -174,29 +174,16 @@ return; } -int JvmtiRawMonitor::simple_wait(Thread* self, jlong millis) { - guarantee(_owner == self , "invariant"); - guarantee(_recursions == 0, "invariant"); - - QNode node(self); +inline void JvmtiRawMonitor::enqueue_waiter(QNode& node) { node._notified = 0; node._t_state = QNode::TS_WAIT; - RawMonitor_lock->lock_without_safepoint_check(); node._next = _wait_set; _wait_set = &node; RawMonitor_lock->unlock(); - - simple_exit(self); - guarantee(_owner != self, "invariant"); +} - int ret = OS_OK; - if (millis <= 0) { - self->_ParkEvent->park(); - } else { - ret = self->_ParkEvent->park(millis); - } - +inline void JvmtiRawMonitor::dequeue_waiter(QNode& node) { // If thread still resides on the waitset then unlink it. // Double-checked locking -- the usage is safe in this context // as _t_state is volatile and the lock-unlock operators are @@ -225,10 +212,60 @@ } guarantee(node._t_state == QNode::TS_RUN, "invariant"); +} + +// simple_wait is not quite so simple as we have to deal with the interaction +// with the Thread interrupt state, which resides in the java.lang.Thread object. +// That state must only be accessed while _thread_in_vm and requires proper thread-state +// transitions. However, we cannot perform such transitions whilst we hold the RawMonitor, +// else we can deadlock with the VMThread (which may also use RawMonitors as part of +// executing various callbacks). +// Returns M_OK usually, but M_INTERRUPTED if the thread is a JavaThread and was +// interrupted. +int JvmtiRawMonitor::simple_wait(Thread* self, jlong millis) { + guarantee(_owner == self , "invariant"); + guarantee(_recursions == 0, "invariant"); + + QNode node(self); + enqueue_waiter(node); + + simple_exit(self); + guarantee(_owner != self, "invariant"); + + int ret = M_OK; + if (self->is_Java_thread()) { + JavaThread* jt = (JavaThread*) self; + // Transition to VM so we can check interrupt state + ThreadInVMfromNative tivm(jt); + if (jt->is_interrupted(true)) { + ret = M_INTERRUPTED; + } else { + ThreadBlockInVM tbivm(jt); + jt->set_suspend_equivalent(); + if (millis <= 0) { + self->_ParkEvent->park(); + } else { + self->_ParkEvent->park(millis); + } + // Return to VM before post-check of interrupt state + } + if (jt->is_interrupted(true)) { + ret = M_INTERRUPTED; + } + } else { + if (millis <= 0) { + self->_ParkEvent->park(); + } else { + self->_ParkEvent->park(millis); + } + } + + dequeue_waiter(node); + simple_enter(self); - guarantee(_owner == self, "invariant"); guarantee(_recursions == 0, "invariant"); + return ret; } @@ -351,60 +388,59 @@ return M_OK; } -// All JavaThreads will enter here with state _thread_blocked - -int JvmtiRawMonitor::raw_wait(jlong millis, bool interruptible, Thread* self) { +int JvmtiRawMonitor::raw_wait(jlong millis, Thread* self) { if (self != _owner) { return M_ILLEGAL_MONITOR_STATE; } + int ret = M_OK; + // To avoid spurious wakeups we reset the parkevent. This is strictly optional. // The caller must be able to tolerate spurious returns from raw_wait(). self->_ParkEvent->reset(); OrderAccess::fence(); - JavaThread* jt = NULL; - // check interrupt event - if (interruptible) { - assert(self->is_Java_thread(), "Only JavaThreads can be interruptible"); - jt = (JavaThread*)self; - if (jt->is_interrupted(true)) { - return M_INTERRUPTED; - } - } else { - assert(!self->is_Java_thread(), "JavaThreads must be interuptible"); - } - intptr_t save = _recursions; _recursions = 0; _waiters++; - if (self->is_Java_thread()) { - guarantee(jt->thread_state() == _thread_blocked, "invariant"); - jt->set_suspend_equivalent(); - } - int rv = simple_wait(self, millis); + ret = simple_wait(self, millis); _recursions = save; _waiters--; guarantee(self == _owner, "invariant"); + if (self->is_Java_thread()) { + JavaThread* jt = (JavaThread*)self; for (;;) { + jt->set_suspend_equivalent(); if (!jt->handle_special_suspend_equivalent_condition()) { break; + } else { + // We've been suspended whilst waiting and so we have to + // relinquish the raw monitor until we are resumed. Of course + // after reacquiring we have to re-check for suspension again. + // Suspension requires we are _thread_blocked, and we also have to + // recheck for being interrupted. + simple_exit(jt); + { + ThreadInVMfromNative tivm(jt); + { + ThreadBlockInVM tbivm(jt); + jt->java_suspend_self(); + } + if (jt->is_interrupted(true)) { + ret = M_INTERRUPTED; + } + } + simple_enter(jt); } - simple_exit(jt); - jt->java_suspend_self(); - simple_enter(jt); - jt->set_suspend_equivalent(); } guarantee(jt == _owner, "invariant"); + } else { + assert(ret != M_INTERRUPTED, "Only JavaThreads can be interrupted"); } - if (interruptible && jt->is_interrupted(true)) { - return M_INTERRUPTED; - } - - return M_OK; + return ret; } int JvmtiRawMonitor::raw_notify(Thread* self) { diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/prims/jvmtiRawMonitor.hpp --- a/src/hotspot/share/prims/jvmtiRawMonitor.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/prims/jvmtiRawMonitor.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -65,6 +65,11 @@ // JVMTI_RM_MAGIC is set in contructor and unset in destructor. enum { JVMTI_RM_MAGIC = (int)(('T' << 24) | ('I' << 16) | ('R' << 8) | 'M') }; + // Helpers for queue management isolation + void enqueue_waiter(QNode& node); + void dequeue_waiter(QNode& node); + + // Mostly low-level implementation routines void simple_enter(Thread* self); void simple_exit(Thread* self); int simple_wait(Thread* self, jlong millis); @@ -92,7 +97,7 @@ int recursions() const { return _recursions; } void raw_enter(Thread* self); int raw_exit(Thread* self); - int raw_wait(jlong millis, bool interruptible, Thread* self); + int raw_wait(jlong millis, Thread* self); int raw_notify(Thread* self); int raw_notifyAll(Thread* self); int magic() const { return _magic; } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/prims/whitebox.cpp diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/runtime/abstract_vm_version.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/runtime/abstract_vm_version.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -0,0 +1,316 @@ +/* + * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "compiler/compilerDefinitions.hpp" +#include "runtime/arguments.hpp" +#include "runtime/vm_version.hpp" +#include "utilities/globalDefinitions.hpp" + +const char* Abstract_VM_Version::_s_vm_release = Abstract_VM_Version::vm_release(); +const char* Abstract_VM_Version::_s_internal_vm_info_string = Abstract_VM_Version::internal_vm_info_string(); + +uint64_t Abstract_VM_Version::_features = 0; +const char* Abstract_VM_Version::_features_string = ""; + +bool Abstract_VM_Version::_supports_cx8 = false; +bool Abstract_VM_Version::_supports_atomic_getset4 = false; +bool Abstract_VM_Version::_supports_atomic_getset8 = false; +bool Abstract_VM_Version::_supports_atomic_getadd4 = false; +bool Abstract_VM_Version::_supports_atomic_getadd8 = false; +unsigned int Abstract_VM_Version::_logical_processors_per_package = 1U; +unsigned int Abstract_VM_Version::_L1_data_cache_line_size = 0; +unsigned int Abstract_VM_Version::_data_cache_line_flush_size = 0; + +VirtualizationType Abstract_VM_Version::_detected_virtualization = NoDetectedVirtualization; + +#ifndef HOTSPOT_VERSION_STRING + #error HOTSPOT_VERSION_STRING must be defined +#endif + +#ifndef VERSION_FEATURE + #error VERSION_FEATURE must be defined +#endif +#ifndef VERSION_INTERIM + #error VERSION_INTERIM must be defined +#endif +#ifndef VERSION_UPDATE + #error VERSION_UPDATE must be defined +#endif +#ifndef VERSION_PATCH + #error VERSION_PATCH must be defined +#endif +#ifndef VERSION_BUILD + #error VERSION_BUILD must be defined +#endif + +#ifndef VERSION_STRING + #error VERSION_STRING must be defined +#endif + +#ifndef DEBUG_LEVEL + #error DEBUG_LEVEL must be defined +#endif + +#define VM_RELEASE HOTSPOT_VERSION_STRING + +// HOTSPOT_VERSION_STRING equals the JDK VERSION_STRING (unless overridden +// in a standalone build). +int Abstract_VM_Version::_vm_major_version = VERSION_FEATURE; +int Abstract_VM_Version::_vm_minor_version = VERSION_INTERIM; +int Abstract_VM_Version::_vm_security_version = VERSION_UPDATE; +int Abstract_VM_Version::_vm_patch_version = VERSION_PATCH; +int Abstract_VM_Version::_vm_build_number = VERSION_BUILD; + +#if defined(_LP64) + #define VMLP "64-Bit " +#else + #define VMLP "" +#endif + +#ifndef VMTYPE + #ifdef TIERED + #define VMTYPE "Server" + #else // TIERED + #ifdef ZERO + #define VMTYPE "Zero" + #else // ZERO + #define VMTYPE COMPILER1_PRESENT("Client") \ + COMPILER2_PRESENT("Server") + #endif // ZERO + #endif // TIERED +#endif + +#ifndef HOTSPOT_VM_DISTRO + #error HOTSPOT_VM_DISTRO must be defined +#endif +#define VMNAME HOTSPOT_VM_DISTRO " " VMLP VMTYPE " VM" + +const char* Abstract_VM_Version::vm_name() { + return VMNAME; +} + + +const char* Abstract_VM_Version::vm_vendor() { +#ifdef VENDOR + return VENDOR; +#else + return "Oracle Corporation"; +#endif +} + + +const char* Abstract_VM_Version::vm_info_string() { + switch (Arguments::mode()) { + case Arguments::_int: + return UseSharedSpaces ? "interpreted mode, sharing" : "interpreted mode"; + case Arguments::_mixed: + if (UseSharedSpaces) { + if (UseAOT) { + return "mixed mode, aot, sharing"; +#ifdef TIERED + } else if(is_client_compilation_mode_vm()) { + return "mixed mode, emulated-client, sharing"; +#endif + } else { + return "mixed mode, sharing"; + } + } else { + if (UseAOT) { + return "mixed mode, aot"; +#ifdef TIERED + } else if(is_client_compilation_mode_vm()) { + return "mixed mode, emulated-client"; +#endif + } else { + return "mixed mode"; + } + } + case Arguments::_comp: +#ifdef TIERED + if (is_client_compilation_mode_vm()) { + return UseSharedSpaces ? "compiled mode, emulated-client, sharing" : "compiled mode, emulated-client"; + } +#endif + return UseSharedSpaces ? "compiled mode, sharing" : "compiled mode"; + }; + ShouldNotReachHere(); + return ""; +} + +// NOTE: do *not* use stringStream. this function is called by +// fatal error handler. if the crash is in native thread, +// stringStream cannot get resource allocated and will SEGV. +const char* Abstract_VM_Version::vm_release() { + return VM_RELEASE; +} + +// NOTE: do *not* use stringStream. this function is called by +// fatal error handlers. if the crash is in native thread, +// stringStream cannot get resource allocated and will SEGV. +const char* Abstract_VM_Version::jre_release_version() { + return VERSION_STRING; +} + +#define OS LINUX_ONLY("linux") \ + WINDOWS_ONLY("windows") \ + SOLARIS_ONLY("solaris") \ + AIX_ONLY("aix") \ + BSD_ONLY("bsd") + +#ifndef CPU +#ifdef ZERO +#define CPU ZERO_LIBARCH +#elif defined(PPC64) +#if defined(VM_LITTLE_ENDIAN) +#define CPU "ppc64le" +#else +#define CPU "ppc64" +#endif // PPC64 +#else +#define CPU AARCH64_ONLY("aarch64") \ + AMD64_ONLY("amd64") \ + IA32_ONLY("x86") \ + IA64_ONLY("ia64") \ + S390_ONLY("s390") \ + SPARC_ONLY("sparc") +#endif // !ZERO +#endif // !CPU + +const char *Abstract_VM_Version::vm_platform_string() { + return OS "-" CPU; +} + +const char* Abstract_VM_Version::internal_vm_info_string() { + #ifndef HOTSPOT_BUILD_USER + #define HOTSPOT_BUILD_USER unknown + #endif + + #ifndef HOTSPOT_BUILD_COMPILER + #ifdef _MSC_VER + #if _MSC_VER == 1600 + #define HOTSPOT_BUILD_COMPILER "MS VC++ 10.0 (VS2010)" + #elif _MSC_VER == 1700 + #define HOTSPOT_BUILD_COMPILER "MS VC++ 11.0 (VS2012)" + #elif _MSC_VER == 1800 + #define HOTSPOT_BUILD_COMPILER "MS VC++ 12.0 (VS2013)" + #elif _MSC_VER == 1900 + #define HOTSPOT_BUILD_COMPILER "MS VC++ 14.0 (VS2015)" + #elif _MSC_VER == 1911 + #define HOTSPOT_BUILD_COMPILER "MS VC++ 15.3 (VS2017)" + #elif _MSC_VER == 1912 + #define HOTSPOT_BUILD_COMPILER "MS VC++ 15.5 (VS2017)" + #elif _MSC_VER == 1913 + #define HOTSPOT_BUILD_COMPILER "MS VC++ 15.6 (VS2017)" + #elif _MSC_VER == 1914 + #define HOTSPOT_BUILD_COMPILER "MS VC++ 15.7 (VS2017)" + #elif _MSC_VER == 1915 + #define HOTSPOT_BUILD_COMPILER "MS VC++ 15.8 (VS2017)" + #else + #define HOTSPOT_BUILD_COMPILER "unknown MS VC++:" XSTR(_MSC_VER) + #endif + #elif defined(__SUNPRO_CC) + #if __SUNPRO_CC == 0x580 + #define HOTSPOT_BUILD_COMPILER "Workshop 5.8" + #elif __SUNPRO_CC == 0x590 + #define HOTSPOT_BUILD_COMPILER "Workshop 5.9" + #elif __SUNPRO_CC == 0x5100 + #define HOTSPOT_BUILD_COMPILER "Sun Studio 12u1" + #elif __SUNPRO_CC == 0x5120 + #define HOTSPOT_BUILD_COMPILER "Sun Studio 12u3" + #elif __SUNPRO_CC == 0x5130 + #define HOTSPOT_BUILD_COMPILER "Sun Studio 12u4" + #else + #define HOTSPOT_BUILD_COMPILER "unknown Workshop:" XSTR(__SUNPRO_CC) + #endif + #elif defined(__clang_version__) + #define HOTSPOT_BUILD_COMPILER "clang " __VERSION__ + #elif defined(__GNUC__) + #define HOTSPOT_BUILD_COMPILER "gcc " __VERSION__ + #else + #define HOTSPOT_BUILD_COMPILER "unknown compiler" + #endif + #endif + + #ifndef FLOAT_ARCH + #if defined(__SOFTFP__) + #define FLOAT_ARCH_STR "-sflt" + #else + #define FLOAT_ARCH_STR "" + #endif + #else + #define FLOAT_ARCH_STR XSTR(FLOAT_ARCH) + #endif + + #define INTERNAL_VERSION_SUFFIX VM_RELEASE ")" \ + " for " OS "-" CPU FLOAT_ARCH_STR \ + " JRE (" VERSION_STRING "), built on " __DATE__ " " __TIME__ \ + " by " XSTR(HOTSPOT_BUILD_USER) " with " HOTSPOT_BUILD_COMPILER + + return strcmp(DEBUG_LEVEL, "release") == 0 + ? VMNAME " (" INTERNAL_VERSION_SUFFIX + : VMNAME " (" DEBUG_LEVEL " " INTERNAL_VERSION_SUFFIX; +} + +const char *Abstract_VM_Version::vm_build_user() { + return HOTSPOT_BUILD_USER; +} + +const char *Abstract_VM_Version::jdk_debug_level() { + return DEBUG_LEVEL; +} + +const char *Abstract_VM_Version::printable_jdk_debug_level() { + // Debug level is not printed for "release" builds + return strcmp(DEBUG_LEVEL, "release") == 0 ? "" : DEBUG_LEVEL " "; +} + +unsigned int Abstract_VM_Version::jvm_version() { + return ((Abstract_VM_Version::vm_major_version() & 0xFF) << 24) | + ((Abstract_VM_Version::vm_minor_version() & 0xFF) << 16) | + ((Abstract_VM_Version::vm_security_version() & 0xFF) << 8) | + (Abstract_VM_Version::vm_build_number() & 0xFF); +} + +bool Abstract_VM_Version::print_matching_lines_from_file(const char* filename, outputStream* st, const char* keywords_to_match[]) { + char line[500]; + FILE* fp = fopen(filename, "r"); + if (fp == NULL) { + return false; + } + + st->print_cr("Virtualization information:"); + while (fgets(line, sizeof(line), fp) != NULL) { + int i = 0; + while (keywords_to_match[i] != NULL) { + if (strncmp(line, keywords_to_match[i], strlen(keywords_to_match[i])) == 0) { + st->print("%s", line); + break; + } + i++; + } + } + fclose(fp); + return true; +} diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/runtime/abstract_vm_version.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/runtime/abstract_vm_version.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -0,0 +1,194 @@ +/* + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_RUNTIME_ABSTRACT_VM_VERSION_HPP +#define SHARE_RUNTIME_ABSTRACT_VM_VERSION_HPP + +#include "memory/allocation.hpp" // For declaration of class AllStatic +#include "utilities/globalDefinitions.hpp" + +typedef enum { + NoDetectedVirtualization, + XenHVM, + KVM, + VMWare, + HyperV, + PowerVM, // on AIX or Linux ppc64(le) + PowerFullPartitionMode, // on Linux ppc64(le) + PowerKVM +} VirtualizationType; + +class outputStream; + +// Abstract_VM_Version provides information about the VM. + +class Abstract_VM_Version: AllStatic { + friend class VMStructs; + friend class JVMCIVMStructs; + + protected: + static const char* _s_vm_release; + static const char* _s_internal_vm_info_string; + + // CPU feature flags. + static uint64_t _features; + static const char* _features_string; + + // These are set by machine-dependent initializations + static bool _supports_cx8; + static bool _supports_atomic_getset4; + static bool _supports_atomic_getset8; + static bool _supports_atomic_getadd4; + static bool _supports_atomic_getadd8; + static unsigned int _logical_processors_per_package; + static unsigned int _L1_data_cache_line_size; + static int _vm_major_version; + static int _vm_minor_version; + static int _vm_security_version; + static int _vm_patch_version; + static int _vm_build_number; + static unsigned int _data_cache_line_flush_size; + + static VirtualizationType _detected_virtualization; + + public: + // Called as part of the runtime services initialization which is + // called from the management module initialization (via init_globals()) + // after argument parsing and attaching of the main thread has + // occurred. Examines a variety of the hardware capabilities of + // the platform to determine which features can be used to execute the + // program. + static void initialize() { } + + // This allows for early initialization of VM_Version information + // that may be needed later in the initialization sequence but before + // full VM_Version initialization is possible. It can not depend on any + // other part of the VM being initialized when called. Platforms that + // need to specialize this define VM_Version::early_initialize(). + static void early_initialize() { } + + // Called to initialize VM variables needing initialization + // after command line parsing. Platforms that need to specialize + // this should define VM_Version::init_before_ergo(). + static void init_before_ergo() {} + + // Name + static const char* vm_name(); + // Vendor + static const char* vm_vendor(); + // VM version information string printed by launcher (java -version) + static const char* vm_info_string(); + static const char* vm_release(); + static const char* vm_platform_string(); + static const char* vm_build_user(); + + static int vm_major_version() { return _vm_major_version; } + static int vm_minor_version() { return _vm_minor_version; } + static int vm_security_version() { return _vm_security_version; } + static int vm_patch_version() { return _vm_patch_version; } + static int vm_build_number() { return _vm_build_number; } + + // Gets the jvm_version_info.jvm_version defined in jvm.h + static unsigned int jvm_version(); + + // Internal version providing additional build information + static const char* internal_vm_info_string(); + static const char* jre_release_version(); + static const char* jdk_debug_level(); + static const char* printable_jdk_debug_level(); + + static uint64_t features() { + return _features; + } + + static const char* features_string() { + return _features_string; + } + + static VirtualizationType get_detected_virtualization() { + return _detected_virtualization; + } + + // platforms that need to specialize this + // define VM_Version::print_platform_virtualization_info() + static void print_platform_virtualization_info(outputStream*) { } + + // does HW support an 8-byte compare-exchange operation? + static bool supports_cx8() { +#ifdef SUPPORTS_NATIVE_CX8 + return true; +#else + return _supports_cx8; +#endif + } + // does HW support atomic get-and-set or atomic get-and-add? Used + // to guide intrinsification decisions for Unsafe atomic ops + static bool supports_atomic_getset4() {return _supports_atomic_getset4;} + static bool supports_atomic_getset8() {return _supports_atomic_getset8;} + static bool supports_atomic_getadd4() {return _supports_atomic_getadd4;} + static bool supports_atomic_getadd8() {return _supports_atomic_getadd8;} + + static unsigned int logical_processors_per_package() { + return _logical_processors_per_package; + } + + static unsigned int L1_data_cache_line_size() { + return _L1_data_cache_line_size; + } + + // the size in bytes of a data cache line flushed by a flush + // operation which should be a power of two or zero if cache line + // writeback is not supported by the current os_cpu combination + static unsigned int data_cache_line_flush_size() { + return _data_cache_line_flush_size; + } + + // returns true if and only if cache line writeback is supported + static bool supports_data_cache_line_flush() { + return _data_cache_line_flush_size != 0; + } + + // ARCH specific policy for the BiasedLocking + static bool use_biased_locking() { return true; } + + // Number of page sizes efficiently supported by the hardware. Most chips now + // support two sizes, thus this default implementation. Processor-specific + // subclasses should define new versions to hide this one as needed. Note + // that the O/S may support more sizes, but at most this many are used. + static uint page_size_count() { return 2; } + + // Denominator for computing default ParallelGCThreads for machines with + // a large number of cores. + static uint parallel_worker_threads_denominator() { return 8; } + + // Does this CPU support spin wait instruction? + static bool supports_on_spin_wait() { return false; } + + // Does platform support fast class initialization checks for static methods? + static bool supports_fast_class_init_checks() { return false; } + + static bool print_matching_lines_from_file(const char* filename, outputStream* st, const char* keywords_to_match[]); +}; + +#endif // SHARE_RUNTIME_ABSTRACT_VM_VERSION_HPP diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/runtime/arguments.cpp --- a/src/hotspot/share/runtime/arguments.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/runtime/arguments.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -85,6 +85,7 @@ bool Arguments::_ClipInlining = ClipInlining; intx Arguments::_Tier3InvokeNotifyFreqLog = Tier3InvokeNotifyFreqLog; intx Arguments::_Tier4InvocationThreshold = Tier4InvocationThreshold; +size_t Arguments::_SharedBaseAddress = SharedBaseAddress; bool Arguments::_enable_preview = false; @@ -618,6 +619,7 @@ { "ResizeOldPLAB", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) }, { "UseCMSBestFit", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) }, { "UseCMSInitiatingOccupancyOnly", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) }, + { "GCLockerInvokesConcurrent", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) }, { "BindGCTaskThreadsToCPUs", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(16) }, { "UseGCTaskAffinity", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(16) }, @@ -2274,6 +2276,9 @@ Arguments::_Tier4InvocationThreshold = Tier4InvocationThreshold; } + // CDS dumping always write the archive to the default value of SharedBaseAddress. + Arguments::_SharedBaseAddress = SharedBaseAddress; + // Setup flags for mixed which is the default set_mode_flags(_mixed); diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/runtime/arguments.hpp --- a/src/hotspot/share/runtime/arguments.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/runtime/arguments.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -481,6 +481,7 @@ static char* SharedArchivePath; static char* SharedDynamicArchivePath; + static size_t _SharedBaseAddress; // The default value specified in globals.hpp static int num_archives(const char* archive_path) NOT_CDS_RETURN_(0); static void extract_shared_archive_paths(const char* archive_path, char** base_archive_path, @@ -563,7 +564,7 @@ static const char* GetSharedArchivePath() { return SharedArchivePath; } static const char* GetSharedDynamicArchivePath() { return SharedDynamicArchivePath; } - + static size_t default_SharedBaseAddress() { return _SharedBaseAddress; } // Java launcher properties static void process_sun_java_launcher_properties(JavaVMInitArgs* args); diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/runtime/globals.hpp --- a/src/hotspot/share/runtime/globals.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/runtime/globals.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -2430,6 +2430,14 @@ product(ccstr, ExtraSharedClassListFile, NULL, \ "Extra classlist for building the CDS archive file") \ \ + diagnostic(intx, ArchiveRelocationMode, 0, \ + "(0) first map at preferred address, and if " \ + "unsuccessful, map at alternative address (default); " \ + "(1) always map at alternative address; " \ + "(2) always map at preferred address, and if unsuccessful, " \ + "do not map the archive") \ + range(0, 2) \ + \ experimental(size_t, ArrayAllocatorMallocLimit, \ SOLARIS_ONLY(64*K) NOT_SOLARIS((size_t)-1), \ "Allocation less than this value will be allocated " \ diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/runtime/mutexLocker.cpp --- a/src/hotspot/share/runtime/mutexLocker.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/runtime/mutexLocker.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -72,6 +72,7 @@ Monitor* CGC_lock = NULL; Monitor* STS_lock = NULL; Monitor* FullGCCount_lock = NULL; +Monitor* G1OldGCCount_lock = NULL; Monitor* DirtyCardQ_CBL_mon = NULL; Mutex* Shared_DirtyCardQ_lock = NULL; Mutex* MarkStackFreeList_lock = NULL; @@ -203,6 +204,8 @@ def(FullGCCount_lock , PaddedMonitor, leaf, true, _safepoint_check_never); // in support of ExplicitGCInvokesConcurrent if (UseG1GC) { + def(G1OldGCCount_lock , PaddedMonitor, leaf, true, _safepoint_check_always); + def(DirtyCardQ_CBL_mon , PaddedMonitor, access, true, _safepoint_check_never); def(Shared_DirtyCardQ_lock , PaddedMutex , access + 1, true, _safepoint_check_never); diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/runtime/mutexLocker.hpp --- a/src/hotspot/share/runtime/mutexLocker.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/runtime/mutexLocker.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -68,6 +68,7 @@ // fore- & background GC threads. extern Monitor* STS_lock; // used for joining/leaving SuspendibleThreadSet. extern Monitor* FullGCCount_lock; // in support of "concurrent" full gc +extern Monitor* G1OldGCCount_lock; // in support of "concurrent" full gc extern Monitor* DirtyCardQ_CBL_mon; // Protects dirty card Q // completed buffer queue. extern Mutex* Shared_DirtyCardQ_lock; // Lock protecting dirty card diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/runtime/objectMonitor.cpp --- a/src/hotspot/share/runtime/objectMonitor.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/runtime/objectMonitor.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -1267,6 +1267,10 @@ int ret = OS_OK; int WasNotified = 0; + + // Need to check interrupt state whilst still _thread_in_vm + bool interrupted = interruptible && jt->is_interrupted(false); + { // State transition wrappers OSThread* osthread = Self->osthread(); OSThreadWaitState osts(osthread, true); @@ -1275,7 +1279,7 @@ // Thread is in thread_blocked state and oop access is unsafe. jt->set_suspend_equivalent(); - if (interruptible && (jt->is_interrupted(false) || HAS_PENDING_EXCEPTION)) { + if (interrupted || HAS_PENDING_EXCEPTION) { // Intentionally empty } else if (node._notified == 0) { if (millis <= 0) { diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/runtime/vmOperations.hpp --- a/src/hotspot/share/runtime/vmOperations.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/runtime/vmOperations.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -66,6 +66,7 @@ template(G1CollectForAllocation) \ template(G1CollectFull) \ template(G1Concurrent) \ + template(G1TryInitiateConcMark) \ template(ZMarkStart) \ template(ZMarkEnd) \ template(ZRelocateStart) \ diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/runtime/vmStructs.cpp --- a/src/hotspot/share/runtime/vmStructs.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/runtime/vmStructs.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -1103,7 +1103,7 @@ CDS_ONLY(nonstatic_field(FileMapInfo, _header, FileMapHeader*)) \ CDS_ONLY( static_field(FileMapInfo, _current_info, FileMapInfo*)) \ CDS_ONLY(nonstatic_field(FileMapHeader, _space[0], CDSFileMapRegion)) \ - CDS_ONLY(nonstatic_field(CDSFileMapRegion, _addr._base, char*)) \ + CDS_ONLY(nonstatic_field(CDSFileMapRegion, _mapped_base, char*)) \ CDS_ONLY(nonstatic_field(CDSFileMapRegion, _used, size_t)) \ \ /******************/ \ diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/runtime/vm_version.cpp --- a/src/hotspot/share/runtime/vm_version.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/runtime/vm_version.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -25,275 +25,8 @@ #include "precompiled.hpp" #include "logging/log.hpp" #include "logging/logStream.hpp" -#include "oops/oop.inline.hpp" -#include "runtime/arguments.hpp" #include "runtime/vm_version.hpp" -const char* Abstract_VM_Version::_s_vm_release = Abstract_VM_Version::vm_release(); -const char* Abstract_VM_Version::_s_internal_vm_info_string = Abstract_VM_Version::internal_vm_info_string(); - -uint64_t Abstract_VM_Version::_features = 0; -const char* Abstract_VM_Version::_features_string = ""; - -bool Abstract_VM_Version::_supports_cx8 = false; -bool Abstract_VM_Version::_supports_atomic_getset4 = false; -bool Abstract_VM_Version::_supports_atomic_getset8 = false; -bool Abstract_VM_Version::_supports_atomic_getadd4 = false; -bool Abstract_VM_Version::_supports_atomic_getadd8 = false; -unsigned int Abstract_VM_Version::_logical_processors_per_package = 1U; -unsigned int Abstract_VM_Version::_L1_data_cache_line_size = 0; -unsigned int Abstract_VM_Version::_data_cache_line_flush_size = 0; - -VirtualizationType Abstract_VM_Version::_detected_virtualization = NoDetectedVirtualization; - -#ifndef HOTSPOT_VERSION_STRING - #error HOTSPOT_VERSION_STRING must be defined -#endif - -#ifndef VERSION_FEATURE - #error VERSION_FEATURE must be defined -#endif -#ifndef VERSION_INTERIM - #error VERSION_INTERIM must be defined -#endif -#ifndef VERSION_UPDATE - #error VERSION_UPDATE must be defined -#endif -#ifndef VERSION_PATCH - #error VERSION_PATCH must be defined -#endif -#ifndef VERSION_BUILD - #error VERSION_BUILD must be defined -#endif - -#ifndef VERSION_STRING - #error VERSION_STRING must be defined -#endif - -#ifndef DEBUG_LEVEL - #error DEBUG_LEVEL must be defined -#endif - -#define VM_RELEASE HOTSPOT_VERSION_STRING - -// HOTSPOT_VERSION_STRING equals the JDK VERSION_STRING (unless overridden -// in a standalone build). -int Abstract_VM_Version::_vm_major_version = VERSION_FEATURE; -int Abstract_VM_Version::_vm_minor_version = VERSION_INTERIM; -int Abstract_VM_Version::_vm_security_version = VERSION_UPDATE; -int Abstract_VM_Version::_vm_patch_version = VERSION_PATCH; -int Abstract_VM_Version::_vm_build_number = VERSION_BUILD; - -#if defined(_LP64) - #define VMLP "64-Bit " -#else - #define VMLP "" -#endif - -#ifndef VMTYPE - #ifdef TIERED - #define VMTYPE "Server" - #else // TIERED - #ifdef ZERO - #define VMTYPE "Zero" - #else // ZERO - #define VMTYPE COMPILER1_PRESENT("Client") \ - COMPILER2_PRESENT("Server") - #endif // ZERO - #endif // TIERED -#endif - -#ifndef HOTSPOT_VM_DISTRO - #error HOTSPOT_VM_DISTRO must be defined -#endif -#define VMNAME HOTSPOT_VM_DISTRO " " VMLP VMTYPE " VM" - -const char* Abstract_VM_Version::vm_name() { - return VMNAME; -} - - -const char* Abstract_VM_Version::vm_vendor() { -#ifdef VENDOR - return VENDOR; -#else - return "Oracle Corporation"; -#endif -} - - -const char* Abstract_VM_Version::vm_info_string() { - switch (Arguments::mode()) { - case Arguments::_int: - return UseSharedSpaces ? "interpreted mode, sharing" : "interpreted mode"; - case Arguments::_mixed: - if (UseSharedSpaces) { - if (UseAOT) { - return "mixed mode, aot, sharing"; -#ifdef TIERED - } else if(is_client_compilation_mode_vm()) { - return "mixed mode, emulated-client, sharing"; -#endif - } else { - return "mixed mode, sharing"; - } - } else { - if (UseAOT) { - return "mixed mode, aot"; -#ifdef TIERED - } else if(is_client_compilation_mode_vm()) { - return "mixed mode, emulated-client"; -#endif - } else { - return "mixed mode"; - } - } - case Arguments::_comp: -#ifdef TIERED - if (is_client_compilation_mode_vm()) { - return UseSharedSpaces ? "compiled mode, emulated-client, sharing" : "compiled mode, emulated-client"; - } -#endif - return UseSharedSpaces ? "compiled mode, sharing" : "compiled mode"; - }; - ShouldNotReachHere(); - return ""; -} - -// NOTE: do *not* use stringStream. this function is called by -// fatal error handler. if the crash is in native thread, -// stringStream cannot get resource allocated and will SEGV. -const char* Abstract_VM_Version::vm_release() { - return VM_RELEASE; -} - -// NOTE: do *not* use stringStream. this function is called by -// fatal error handlers. if the crash is in native thread, -// stringStream cannot get resource allocated and will SEGV. -const char* Abstract_VM_Version::jre_release_version() { - return VERSION_STRING; -} - -#define OS LINUX_ONLY("linux") \ - WINDOWS_ONLY("windows") \ - SOLARIS_ONLY("solaris") \ - AIX_ONLY("aix") \ - BSD_ONLY("bsd") - -#ifndef CPU -#ifdef ZERO -#define CPU ZERO_LIBARCH -#elif defined(PPC64) -#if defined(VM_LITTLE_ENDIAN) -#define CPU "ppc64le" -#else -#define CPU "ppc64" -#endif // PPC64 -#else -#define CPU AARCH64_ONLY("aarch64") \ - AMD64_ONLY("amd64") \ - IA32_ONLY("x86") \ - IA64_ONLY("ia64") \ - S390_ONLY("s390") \ - SPARC_ONLY("sparc") -#endif // !ZERO -#endif // !CPU - -const char *Abstract_VM_Version::vm_platform_string() { - return OS "-" CPU; -} - -const char* Abstract_VM_Version::internal_vm_info_string() { - #ifndef HOTSPOT_BUILD_USER - #define HOTSPOT_BUILD_USER unknown - #endif - - #ifndef HOTSPOT_BUILD_COMPILER - #ifdef _MSC_VER - #if _MSC_VER == 1600 - #define HOTSPOT_BUILD_COMPILER "MS VC++ 10.0 (VS2010)" - #elif _MSC_VER == 1700 - #define HOTSPOT_BUILD_COMPILER "MS VC++ 11.0 (VS2012)" - #elif _MSC_VER == 1800 - #define HOTSPOT_BUILD_COMPILER "MS VC++ 12.0 (VS2013)" - #elif _MSC_VER == 1900 - #define HOTSPOT_BUILD_COMPILER "MS VC++ 14.0 (VS2015)" - #elif _MSC_VER == 1911 - #define HOTSPOT_BUILD_COMPILER "MS VC++ 15.3 (VS2017)" - #elif _MSC_VER == 1912 - #define HOTSPOT_BUILD_COMPILER "MS VC++ 15.5 (VS2017)" - #elif _MSC_VER == 1913 - #define HOTSPOT_BUILD_COMPILER "MS VC++ 15.6 (VS2017)" - #elif _MSC_VER == 1914 - #define HOTSPOT_BUILD_COMPILER "MS VC++ 15.7 (VS2017)" - #elif _MSC_VER == 1915 - #define HOTSPOT_BUILD_COMPILER "MS VC++ 15.8 (VS2017)" - #else - #define HOTSPOT_BUILD_COMPILER "unknown MS VC++:" XSTR(_MSC_VER) - #endif - #elif defined(__SUNPRO_CC) - #if __SUNPRO_CC == 0x580 - #define HOTSPOT_BUILD_COMPILER "Workshop 5.8" - #elif __SUNPRO_CC == 0x590 - #define HOTSPOT_BUILD_COMPILER "Workshop 5.9" - #elif __SUNPRO_CC == 0x5100 - #define HOTSPOT_BUILD_COMPILER "Sun Studio 12u1" - #elif __SUNPRO_CC == 0x5120 - #define HOTSPOT_BUILD_COMPILER "Sun Studio 12u3" - #elif __SUNPRO_CC == 0x5130 - #define HOTSPOT_BUILD_COMPILER "Sun Studio 12u4" - #else - #define HOTSPOT_BUILD_COMPILER "unknown Workshop:" XSTR(__SUNPRO_CC) - #endif - #elif defined(__clang_version__) - #define HOTSPOT_BUILD_COMPILER "clang " __VERSION__ - #elif defined(__GNUC__) - #define HOTSPOT_BUILD_COMPILER "gcc " __VERSION__ - #else - #define HOTSPOT_BUILD_COMPILER "unknown compiler" - #endif - #endif - - #ifndef FLOAT_ARCH - #if defined(__SOFTFP__) - #define FLOAT_ARCH_STR "-sflt" - #else - #define FLOAT_ARCH_STR "" - #endif - #else - #define FLOAT_ARCH_STR XSTR(FLOAT_ARCH) - #endif - - #define INTERNAL_VERSION_SUFFIX VM_RELEASE ")" \ - " for " OS "-" CPU FLOAT_ARCH_STR \ - " JRE (" VERSION_STRING "), built on " __DATE__ " " __TIME__ \ - " by " XSTR(HOTSPOT_BUILD_USER) " with " HOTSPOT_BUILD_COMPILER - - return strcmp(DEBUG_LEVEL, "release") == 0 - ? VMNAME " (" INTERNAL_VERSION_SUFFIX - : VMNAME " (" DEBUG_LEVEL " " INTERNAL_VERSION_SUFFIX; -} - -const char *Abstract_VM_Version::vm_build_user() { - return HOTSPOT_BUILD_USER; -} - -const char *Abstract_VM_Version::jdk_debug_level() { - return DEBUG_LEVEL; -} - -const char *Abstract_VM_Version::printable_jdk_debug_level() { - // Debug level is not printed for "release" builds - return strcmp(DEBUG_LEVEL, "release") == 0 ? "" : DEBUG_LEVEL " "; -} - -unsigned int Abstract_VM_Version::jvm_version() { - return ((Abstract_VM_Version::vm_major_version() & 0xFF) << 24) | - ((Abstract_VM_Version::vm_minor_version() & 0xFF) << 16) | - ((Abstract_VM_Version::vm_security_version() & 0xFF) << 8) | - (Abstract_VM_Version::vm_build_number() & 0xFF); -} - void VM_Version_init() { VM_Version::initialize(); @@ -304,27 +37,3 @@ os::print_cpu_info(&ls, buf, sizeof(buf)); } } - -bool Abstract_VM_Version::print_matching_lines_from_file(const char* filename, outputStream* st, const char* keywords_to_match[]) { - char line[500]; - FILE* fp = fopen(filename, "r"); - if (fp == NULL) { - return false; - } - - st->print_cr("Virtualization information:"); - while (fgets(line, sizeof(line), fp) != NULL) { - int i = 0; - while (keywords_to_match[i] != NULL) { - if (strncmp(line, keywords_to_match[i], strlen(keywords_to_match[i])) == 0) { - st->print("%s", line); - break; - } - i++; - } - } - fclose(fp); - return true; -} - - diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/runtime/vm_version.hpp --- a/src/hotspot/share/runtime/vm_version.hpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/runtime/vm_version.hpp Mon Nov 18 12:40:06 2019 -0500 @@ -25,171 +25,7 @@ #ifndef SHARE_RUNTIME_VM_VERSION_HPP #define SHARE_RUNTIME_VM_VERSION_HPP -#include "memory/allocation.hpp" -#include "utilities/ostream.hpp" -#include "utilities/macros.hpp" - -typedef enum { - NoDetectedVirtualization, - XenHVM, - KVM, - VMWare, - HyperV, - PowerVM, // on AIX or Linux ppc64(le) - PowerFullPartitionMode, // on Linux ppc64(le) - PowerKVM -} VirtualizationType; - -// VM_Version provides information about the VM. - -class Abstract_VM_Version: AllStatic { - friend class VMStructs; - friend class JVMCIVMStructs; - - protected: - static const char* _s_vm_release; - static const char* _s_internal_vm_info_string; - - // CPU feature flags. - static uint64_t _features; - static const char* _features_string; - - // These are set by machine-dependent initializations - static bool _supports_cx8; - static bool _supports_atomic_getset4; - static bool _supports_atomic_getset8; - static bool _supports_atomic_getadd4; - static bool _supports_atomic_getadd8; - static unsigned int _logical_processors_per_package; - static unsigned int _L1_data_cache_line_size; - static int _vm_major_version; - static int _vm_minor_version; - static int _vm_security_version; - static int _vm_patch_version; - static int _vm_build_number; - static unsigned int _data_cache_line_flush_size; - - static VirtualizationType _detected_virtualization; - - public: - // Called as part of the runtime services initialization which is - // called from the management module initialization (via init_globals()) - // after argument parsing and attaching of the main thread has - // occurred. Examines a variety of the hardware capabilities of - // the platform to determine which features can be used to execute the - // program. - static void initialize() { } - - // This allows for early initialization of VM_Version information - // that may be needed later in the initialization sequence but before - // full VM_Version initialization is possible. It can not depend on any - // other part of the VM being initialized when called. Platforms that - // need to specialize this define VM_Version::early_initialize(). - static void early_initialize() { } - - // Called to initialize VM variables needing initialization - // after command line parsing. Platforms that need to specialize - // this should define VM_Version::init_before_ergo(). - static void init_before_ergo() {} - - // Name - static const char* vm_name(); - // Vendor - static const char* vm_vendor(); - // VM version information string printed by launcher (java -version) - static const char* vm_info_string(); - static const char* vm_release(); - static const char* vm_platform_string(); - static const char* vm_build_user(); - - static int vm_major_version() { return _vm_major_version; } - static int vm_minor_version() { return _vm_minor_version; } - static int vm_security_version() { return _vm_security_version; } - static int vm_patch_version() { return _vm_patch_version; } - static int vm_build_number() { return _vm_build_number; } - - // Gets the jvm_version_info.jvm_version defined in jvm.h - static unsigned int jvm_version(); - - // Internal version providing additional build information - static const char* internal_vm_info_string(); - static const char* jre_release_version(); - static const char* jdk_debug_level(); - static const char* printable_jdk_debug_level(); - - static uint64_t features() { - return _features; - } - - static const char* features_string() { - return _features_string; - } - - static VirtualizationType get_detected_virtualization() { - return _detected_virtualization; - } - - // platforms that need to specialize this - // define VM_Version::print_platform_virtualization_info() - static void print_platform_virtualization_info(outputStream*) { } - - // does HW support an 8-byte compare-exchange operation? - static bool supports_cx8() { -#ifdef SUPPORTS_NATIVE_CX8 - return true; -#else - return _supports_cx8; -#endif - } - // does HW support atomic get-and-set or atomic get-and-add? Used - // to guide intrinsification decisions for Unsafe atomic ops - static bool supports_atomic_getset4() {return _supports_atomic_getset4;} - static bool supports_atomic_getset8() {return _supports_atomic_getset8;} - static bool supports_atomic_getadd4() {return _supports_atomic_getadd4;} - static bool supports_atomic_getadd8() {return _supports_atomic_getadd8;} - - static unsigned int logical_processors_per_package() { - return _logical_processors_per_package; - } - - static unsigned int L1_data_cache_line_size() { - return _L1_data_cache_line_size; - } - - // the size in bytes of a data cache line flushed by a flush - // operation which should be a power of two or zero if cache line - // writeback is not supported by the current os_cpu combination - static unsigned int data_cache_line_flush_size() { - return _data_cache_line_flush_size; - } - - // returns true if and only if cache line writeback is supported - static bool supports_data_cache_line_flush() { - return _data_cache_line_flush_size != 0; - } - - // ARCH specific policy for the BiasedLocking - static bool use_biased_locking() { return true; } - - // Number of page sizes efficiently supported by the hardware. Most chips now - // support two sizes, thus this default implementation. Processor-specific - // subclasses should define new versions to hide this one as needed. Note - // that the O/S may support more sizes, but at most this many are used. - static uint page_size_count() { return 2; } - - // Denominator for computing default ParallelGCThreads for machines with - // a large number of cores. - static uint parallel_worker_threads_denominator() { return 8; } - - // Does this CPU support spin wait instruction? - static bool supports_on_spin_wait() { return false; } - - // Does platform support fast class initialization checks for static methods? - static bool supports_fast_class_init_checks() { return false; } - - static bool print_matching_lines_from_file(const char* filename, outputStream* st, const char* keywords_to_match[]); -}; - +#include "utilities/macros.hpp" // for CPU_HEADER() macro. #include CPU_HEADER(vm_version) #endif // SHARE_RUNTIME_VM_VERSION_HPP diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/services/mallocTracker.hpp diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/services/memTracker.hpp diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/services/virtualMemoryTracker.cpp --- a/src/hotspot/share/services/virtualMemoryTracker.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/services/virtualMemoryTracker.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -45,8 +45,8 @@ if (ThreadStackTracker::track_as_vm()) { // Snapshot current thread stacks VirtualMemoryTracker::snapshot_thread_stacks(); - as_snapshot()->copy_to(s); } + as_snapshot()->copy_to(s); } SortedLinkedList* VirtualMemoryTracker::_reserved_regions; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/hotspot/share/utilities/hashtable.cpp --- a/src/hotspot/share/utilities/hashtable.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/hotspot/share/utilities/hashtable.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -61,7 +61,7 @@ if (entry == NULL) { if (_first_free_entry + _entry_size >= _end_block) { - int block_size = MIN2(512, MAX2((int)_table_size / 2, (int)_number_of_entries)); + int block_size = MIN2(512, MAX3(2, (int)_table_size / 2, (int)_number_of_entries)); int len = _entry_size * block_size; len = 1 << log2_int(len); // round down to power of 2 assert(len >= _entry_size, ""); diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/java.base/share/classes/java/lang/invoke/InvokerBytecodeGenerator.java --- a/src/java.base/share/classes/java/lang/invoke/InvokerBytecodeGenerator.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/java.base/share/classes/java/lang/invoke/InvokerBytecodeGenerator.java Mon Nov 18 12:40:06 2019 -0500 @@ -1168,7 +1168,7 @@ * } catch (Throwable e) { * if (!a2.isInstance(e)) throw e; * return a3.invokeBasic(ex, a6, a7); - * }} + * }} */ private Name emitGuardWithCatch(int pos) { Name args = lambdaForm.names[pos]; @@ -1263,26 +1263,27 @@ * load target (-- target) * load args (-- args... target) * INVOKEVIRTUAL MethodHandle.invokeBasic (depends) - * FINALLY_NORMAL: (-- r) - * load cleanup (-- cleanup r) - * SWAP (-- r cleanup) - * ACONST_NULL (-- t r cleanup) - * SWAP (-- r t cleanup) - * load args (-- args... r t cleanup) - * INVOKEVIRTUAL MethodHandle.invokeBasic (-- r) + * FINALLY_NORMAL: (-- r_2nd* r) + * store returned value (--) + * load cleanup (-- cleanup) + * ACONST_NULL (-- t cleanup) + * load returned value (-- r_2nd* r t cleanup) + * load args (-- args... r_2nd* r t cleanup) + * INVOKEVIRTUAL MethodHandle.invokeBasic (-- r_2nd* r) * GOTO DONE * CATCH: (-- t) * DUP (-- t t) * FINALLY_EXCEPTIONAL: (-- t t) * load cleanup (-- cleanup t t) * SWAP (-- t cleanup t) - * load default for r (-- r t cleanup t) - * load args (-- args... r t cleanup t) - * INVOKEVIRTUAL MethodHandle.invokeBasic (-- r t) - * POP (-- t) + * load default for r (-- r_2nd* r t cleanup t) + * load args (-- args... r_2nd* r t cleanup t) + * INVOKEVIRTUAL MethodHandle.invokeBasic (-- r_2nd* r t) + * POP/POP2* (-- t) * ATHROW * DONE: (-- r) * } + * * = depends on whether the return type takes up 2 stack slots. */ private Name emitTryFinally(int pos) { Name args = lambdaForm.names[pos]; @@ -1295,7 +1296,9 @@ Label lDone = new Label(); Class returnType = result.function.resolvedHandle().type().returnType(); + BasicType basicReturnType = BasicType.basicType(returnType); boolean isNonVoid = returnType != void.class; + MethodType type = args.function.resolvedHandle().type() .dropParameterTypes(0,1) .changeReturnType(returnType); @@ -1316,13 +1319,14 @@ mv.visitLabel(lTo); // FINALLY_NORMAL: - emitPushArgument(invoker, 1); // load cleanup + int index = extendLocalsMap(new Class[]{ returnType }); if (isNonVoid) { - mv.visitInsn(Opcodes.SWAP); + emitStoreInsn(basicReturnType, index); } + emitPushArgument(invoker, 1); // load cleanup mv.visitInsn(Opcodes.ACONST_NULL); if (isNonVoid) { - mv.visitInsn(Opcodes.SWAP); + emitLoadInsn(basicReturnType, index); } emitPushArguments(args, 1); // load args (skip 0: method handle) mv.visitMethodInsn(Opcodes.INVOKEVIRTUAL, MH, "invokeBasic", cleanupDesc, false); @@ -1341,7 +1345,7 @@ emitPushArguments(args, 1); // load args (skip 0: method handle) mv.visitMethodInsn(Opcodes.INVOKEVIRTUAL, MH, "invokeBasic", cleanupDesc, false); if (isNonVoid) { - mv.visitInsn(Opcodes.POP); + emitPopInsn(basicReturnType); } mv.visitInsn(Opcodes.ATHROW); @@ -1351,6 +1355,24 @@ return result; } + private void emitPopInsn(BasicType type) { + mv.visitInsn(popInsnOpcode(type)); + } + + private static int popInsnOpcode(BasicType type) { + switch (type) { + case I_TYPE: + case F_TYPE: + case L_TYPE: + return Opcodes.POP; + case J_TYPE: + case D_TYPE: + return Opcodes.POP2; + default: + throw new InternalError("unknown type: " + type); + } + } + /** * Emit bytecode for the loop idiom. *

diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/java.base/share/classes/java/net/DatagramSocket.java --- a/src/java.base/share/classes/java/net/DatagramSocket.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/java.base/share/classes/java/net/DatagramSocket.java Mon Nov 18 12:40:06 2019 -0500 @@ -646,7 +646,9 @@ * if this socket has an associated channel, * and the channel is in non-blocking mode. * @throws IllegalArgumentException if the socket is connected, - * and connected address and packet address differ. + * and connected address and packet address differ, or + * if the socket is not connected and the packet address + * is not set. * * @see java.net.DatagramPacket * @see SecurityManager#checkMulticast(InetAddress) @@ -655,12 +657,15 @@ * @spec JSR-51 */ public void send(DatagramPacket p) throws IOException { - InetAddress packetAddress = null; synchronized (p) { if (isClosed()) throw new SocketException("Socket is closed"); - checkAddress (p.getAddress(), "send"); + InetAddress packetAddress = p.getAddress(); + checkAddress (packetAddress, "send"); if (connectState == ST_NOT_CONNECTED) { + if (packetAddress == null) { + throw new IllegalArgumentException("Address not set"); + } // check the address is ok with the security manager on every send. SecurityManager security = System.getSecurityManager(); @@ -669,16 +674,15 @@ // while you are trying to send the packet for example // after the security check but before the send. if (security != null) { - if (p.getAddress().isMulticastAddress()) { - security.checkMulticast(p.getAddress()); + if (packetAddress.isMulticastAddress()) { + security.checkMulticast(packetAddress); } else { - security.checkConnect(p.getAddress().getHostAddress(), + security.checkConnect(packetAddress.getHostAddress(), p.getPort()); } } } else { // we're connected - packetAddress = p.getAddress(); if (packetAddress == null) { p.setAddress(connectedAddress); p.setPort(connectedPort); diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/java.base/share/classes/java/net/MulticastSocket.java --- a/src/java.base/share/classes/java/net/MulticastSocket.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/java.base/share/classes/java/net/MulticastSocket.java Mon Nov 18 12:40:06 2019 -0500 @@ -29,6 +29,7 @@ import java.util.Collections; import java.util.Enumeration; import java.util.Set; +import java.net.PortUnreachableException; /** * The multicast datagram socket class is useful for sending @@ -643,11 +644,19 @@ * @param ttl optional time to live for multicast packet. * default ttl is 1. * - * @throws IOException is raised if an error occurs i.e - * error while setting ttl. + * @throws IOException is raised if an error occurs i.e + * error while setting ttl. * @throws SecurityException if a security manager exists and its * {@code checkMulticast} or {@code checkConnect} * method doesn't allow the send. + * @throws PortUnreachableException may be thrown if the socket is connected + * to a currently unreachable destination. Note, there is no + * guarantee that the exception will be thrown. + * @throws IllegalArgumentException if the socket is connected, + * and connected address and packet address differ, or + * if the socket is not connected and the packet address + * is not set. + * * * @deprecated Use the following code or its equivalent instead: * ...... @@ -667,32 +676,34 @@ throws IOException { if (isClosed()) throw new SocketException("Socket is closed"); - checkAddress(p.getAddress(), "send"); synchronized(ttlLock) { synchronized(p) { + InetAddress packetAddress = p.getAddress(); + checkAddress(packetAddress, "send"); if (connectState == ST_NOT_CONNECTED) { + if (packetAddress == null) { + throw new IllegalArgumentException("Address not set"); + } // Security manager makes sure that the multicast address // is allowed one and that the ttl used is less // than the allowed maxttl. SecurityManager security = System.getSecurityManager(); if (security != null) { - if (p.getAddress().isMulticastAddress()) { - security.checkMulticast(p.getAddress(), ttl); + if (packetAddress.isMulticastAddress()) { + security.checkMulticast(packetAddress, ttl); } else { - security.checkConnect(p.getAddress().getHostAddress(), + security.checkConnect(packetAddress.getHostAddress(), p.getPort()); } } } else { // we're connected - InetAddress packetAddress = null; - packetAddress = p.getAddress(); if (packetAddress == null) { p.setAddress(connectedAddress); p.setPort(connectedPort); } else if ((!packetAddress.equals(connectedAddress)) || p.getPort() != connectedPort) { - throw new SecurityException("connected address and packet address" + + throw new IllegalArgumentException("connected address and packet address" + " differ"); } } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/java.base/share/classes/java/nio/channels/DatagramChannel.java --- a/src/java.base/share/classes/java/nio/channels/DatagramChannel.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/java.base/share/classes/java/nio/channels/DatagramChannel.java Mon Nov 18 12:40:06 2019 -0500 @@ -255,8 +255,11 @@ *

The channel's socket is configured so that it only receives * datagrams from, and sends datagrams to, the given remote peer * address. Once connected, datagrams may not be received from or sent to - * any other address. A datagram socket remains connected until it is - * explicitly disconnected or until it is closed. + * any other address. Datagrams in the channel's {@linkplain + * java.net.StandardSocketOptions#SO_RCVBUF socket receive buffer}, which + * have not been {@linkplain #receive(ByteBuffer) received} before invoking + * this method, may be discarded. The channel's socket remains connected + * until it is explicitly disconnected or until it is closed. * *

This method performs exactly the same security checks as the {@link * java.net.DatagramSocket#connect connect} method of the {@link @@ -270,12 +273,13 @@ * should be taken to ensure that a connected datagram channel is not shared * with untrusted code. * - *

This method may be invoked at any time. It will not have any effect - * on read or write operations that are already in progress at the moment - * that it is invoked. If this channel's socket is not bound then this method - * will first cause the socket to be bound to an address that is assigned + *

This method may be invoked at any time. If another thread has + * already initiated a read or write operation upon this channel, then an + * invocation of this method will block until any such operation is + * complete. If this channel's socket is not bound then this method will + * first cause the socket to be bound to an address that is assigned * automatically, as if invoking the {@link #bind bind} method with a - * parameter of {@code null}.

+ * parameter of {@code null}.

* * @param remote * The remote address to which this channel is to be connected @@ -323,9 +327,10 @@ * from, and sends datagrams to, any remote address so long as the security * manager, if installed, permits it. * - *

This method may be invoked at any time. It will not have any effect - * on read or write operations that are already in progress at the moment - * that it is invoked. + *

This method may be invoked at any time. If another thread has + * already initiated a read or write operation upon this channel, then an + * invocation of this method will block until any such operation is + * complete. * *

If this channel's socket is not connected, or if the channel is * closed, then invoking this method has no effect.

diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/java.base/share/classes/sun/security/tools/keytool/Main.java --- a/src/java.base/share/classes/sun/security/tools/keytool/Main.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/java.base/share/classes/sun/security/tools/keytool/Main.java Mon Nov 18 12:40:06 2019 -0500 @@ -1151,17 +1151,15 @@ } } else if (command == GENKEYPAIR) { if (keyAlgName == null) { - keyAlgName = "DSA"; - weakWarnings.add(String.format(rb.getString( - "keyalg.option.1.missing.warning"), keyAlgName)); + throw new Exception(rb.getString( + "keyalg.option.missing.error")); } doGenKeyPair(alias, dname, keyAlgName, keysize, groupName, sigAlgName); kssave = true; } else if (command == GENSECKEY) { if (keyAlgName == null) { - keyAlgName = "DES"; - weakWarnings.add(String.format(rb.getString( - "keyalg.option.1.missing.warning"), keyAlgName)); + throw new Exception(rb.getString( + "keyalg.option.missing.error")); } doGenSecretKey(alias, keyAlgName, keysize); kssave = true; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/java.base/share/classes/sun/security/tools/keytool/Resources.java --- a/src/java.base/share/classes/sun/security/tools/keytool/Resources.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/java.base/share/classes/sun/security/tools/keytool/Resources.java Mon Nov 18 12:40:06 2019 -0500 @@ -474,7 +474,7 @@ {"migrate.keystore.warning", "Migrated \"%1$s\" to %4$s. The %2$s keystore is backed up as \"%3$s\"."}, {"backup.keystore.warning", "The original keystore \"%1$s\" is backed up as \"%3$s\"..."}, {"importing.keystore.status", "Importing keystore %1$s to %2$s..."}, - {"keyalg.option.1.missing.warning", "No -keyalg option. The default key algorithm (%s) is a legacy algorithm and is no longer recommended. In a subsequent release of the JDK, the default will be removed and the -keyalg option must be specified."}, + {"keyalg.option.missing.error", "The -keyalg option must be specified."}, {"showinfo.no.option", "Missing option for -showinfo. Try \"keytool -showinfo -tls\"."}, }; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/java.compiler/share/classes/javax/lang/model/SourceVersion.java --- a/src/java.compiler/share/classes/javax/lang/model/SourceVersion.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/java.compiler/share/classes/javax/lang/model/SourceVersion.java Mon Nov 18 12:40:06 2019 -0500 @@ -58,9 +58,9 @@ * 9: modules, small cleanups to 1.7 and 1.8 changes * 10: local-variable type inference (var) * 11: local-variable syntax for lambda parameters - * 12: no changes (switch expressions were in preview) + * 12: no changes (switch expressions in preview) * 13: no changes (switch expressions and text blocks in preview) - * 14: TBD + * 14: switch expressions */ /** @@ -199,6 +199,8 @@ * The version recognized by the Java Platform, Standard Edition * 14. * + * Additions in this release include switch expressions. + * * @since 14 */ RELEASE_14; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/java.xml/share/classes/com/sun/org/apache/xalan/internal/xsltc/trax/DOM2TO.java --- a/src/java.xml/share/classes/com/sun/org/apache/xalan/internal/xsltc/trax/DOM2TO.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/java.xml/share/classes/com/sun/org/apache/xalan/internal/xsltc/trax/DOM2TO.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,6 +1,5 @@ /* - * reserved comment block - * DO NOT REMOVE OR ALTER! + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -42,6 +41,7 @@ /** * @author Santiago Pericas-Geertsen * @author Sunitha Reddy + * @LastModified: Nov 2019 */ public class DOM2TO implements XMLReader, Locator2 { @@ -171,7 +171,7 @@ } // Process all non-namespace attributes next - NamespaceMappings nm = new NamespaceMappings(); + NamespaceMappings nm = null; for (int i = 0; i < length; i++) { final Node attr = map.item(i); final String qnameAttr = attr.getNodeName(); @@ -187,6 +187,7 @@ // For attributes not given an prefix explictly // but having a namespace uri we need // to explicitly generate the prefix + if (nm == null) nm = new NamespaceMappings(); String newPrefix = nm.lookupPrefix(uriAttr); if (newPrefix == null) newPrefix = nm.generateNextPrefix(); diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/java.xml/share/legal/xalan.md --- a/src/java.xml/share/legal/xalan.md Wed Nov 13 17:21:31 2019 -0500 +++ b/src/java.xml/share/legal/xalan.md Mon Nov 18 12:40:06 2019 -0500 @@ -231,4 +231,25 @@ See the License for the specific language governing permissions and limitations under the License. + +JLEX COPYRIGHT NOTICE, LICENSE AND DISCLAIMER. +Copyright 1996-2003 by Elliot Joel Berk and C. Scott Ananian +Permission to use, copy, modify, and distribute this software and +its documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both the copyright notice and this permission notice and warranty +disclaimer appear in supporting documentation, and that the name of +the authors or their employers not be used in advertising or publicity +pertaining to distribution of the software without specific, written +prior permission. +The authors and their employers disclaim all warranties with regard to +this software, including all implied warranties of merchantability and +fitness. In no event shall the authors or their employers be liable for +any special, indirect or consequential damages or any damages whatsoever +resulting from loss of use, data or profits, whether in an action of +contract, negligence or other tortious action, arising out of or in +connection with the use or performance of this software.The portions of +JLex output which are hard-coded into the JLex source code are (naturally) +covered by this same license. + diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.compiler/share/classes/com/sun/tools/javac/code/Source.java --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Source.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Source.java Mon Nov 18 12:40:06 2019 -0500 @@ -84,7 +84,7 @@ /** 1.11 local-variable syntax for lambda parameters */ JDK11("11"), - /** 12, no language features; switch expression were in preview */ + /** 12, no language features; switch expression in preview */ JDK12("12"), /** @@ -94,8 +94,7 @@ JDK13("13"), /** - * 14 covers the to be determined language features that will be - * added in JDK 14. + * 14, switch expressions */ JDK14("14"); diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/FileMapInfo.java --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/FileMapInfo.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/FileMapInfo.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,7 +70,7 @@ // SpaceInfo type = db.lookupType("CDSFileMapRegion"); - long mdRegionBaseAddressOffset = type.getField("_addr._base").getOffset(); + long mdRegionBaseAddressOffset = type.getField("_mapped_base").getOffset(); mdRegionBaseAddress = (mdSpaceValue.addOffsetTo(mdRegionBaseAddressOffset)).getAddressAt(0); long mdRegionSizeOffset = type.getField("_used").getOffset(); long mdRegionSize = (mdSpaceValue.addOffsetTo(mdRegionSizeOffset)).getAddressAt(0).asLongValue(); diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.hotspot.agent/share/native/libsaproc/ps_core_common.c --- a/src/jdk.hotspot.agent/share/native/libsaproc/ps_core_common.c Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.hotspot.agent/share/native/libsaproc/ps_core_common.c Mon Nov 18 12:40:06 2019 -0500 @@ -261,6 +261,7 @@ // mangled name of Arguments::SharedArchivePath #define SHARED_ARCHIVE_PATH_SYM "_ZN9Arguments17SharedArchivePathE" #define USE_SHARED_SPACES_SYM "UseSharedSpaces" +#define SHARED_BASE_ADDRESS_SYM "SharedBaseAddress" #define LIBJVM_NAME "/libjvm.so" #endif @@ -268,6 +269,7 @@ // mangled name of Arguments::SharedArchivePath #define SHARED_ARCHIVE_PATH_SYM "__ZN9Arguments17SharedArchivePathE" #define USE_SHARED_SPACES_SYM "_UseSharedSpaces" +#define SHARED_BASE_ADDRESS_SYM "_SharedBaseAddress" #define LIBJVM_NAME "/libjvm.dylib" #endif @@ -281,7 +283,8 @@ char classes_jsa[PATH_MAX]; CDSFileMapHeaderBase header; int fd = -1; - uintptr_t base = 0, useSharedSpacesAddr = 0; + uintptr_t useSharedSpacesAddr = 0; + uintptr_t sharedBaseAddressAddr = 0, sharedBaseAddress = 0; uintptr_t sharedArchivePathAddrAddr = 0, sharedArchivePathAddr = 0; jboolean useSharedSpaces = 0; int m; @@ -308,6 +311,17 @@ return true; } + sharedBaseAddressAddr = lookup_symbol(ph, jvm_name, SHARED_BASE_ADDRESS_SYM); + if (sharedBaseAddressAddr == 0) { + print_debug("can't lookup 'SharedBaseAddress' flag\n"); + return false; + } + + if (read_pointer(ph, sharedBaseAddressAddr, &sharedBaseAddress) != true) { + print_debug("can't read the value of 'SharedBaseAddress' flag\n"); + return false; + } + sharedArchivePathAddrAddr = lookup_symbol(ph, jvm_name, SHARED_ARCHIVE_PATH_SYM); if (sharedArchivePathAddrAddr == 0) { print_debug("can't lookup shared archive path symbol\n"); @@ -363,16 +377,19 @@ ph->core->classes_jsa_fd = fd; // add read-only maps from classes.jsa to the list of maps for (m = 0; m < NUM_CDS_REGIONS; m++) { - if (header._space[m]._read_only) { + if (header._space[m]._read_only && + !header._space[m]._is_heap_region && + !header._space[m]._is_bitmap_region) { // With *some* linux versions, the core file doesn't include read-only mmap'ed // files regions, so let's add them here. This is harmless if the core file also // include these regions. - base = (uintptr_t) header._space[m]._addr._base; + uintptr_t base = sharedBaseAddress + (uintptr_t) header._space[m]._mapping_offset; + size_t size = header._space[m]._used; // no need to worry about the fractional pages at-the-end. // possible fractional pages are handled by core_read_data. add_class_share_map_info(ph, (off_t) header._space[m]._file_offset, - base, (size_t) header._space[m]._used); - print_debug("added a share archive map at 0x%lx\n", base); + base, size); + print_debug("added a share archive map [%d] at 0x%lx (size 0x%lx bytes)\n", m, base, size); } } return true; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.hotspot.agent/solaris/native/libsaproc/saproc.cpp --- a/src/jdk.hotspot.agent/solaris/native/libsaproc/saproc.cpp Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.hotspot.agent/solaris/native/libsaproc/saproc.cpp Mon Nov 18 12:40:06 2019 -0500 @@ -538,9 +538,11 @@ } #define USE_SHARED_SPACES_SYM "UseSharedSpaces" +#define SHARED_BASE_ADDRESS_SYM "SharedBaseAddress" // mangled symbol name for Arguments::SharedArchivePath #define SHARED_ARCHIVE_PATH_SYM "__1cJArgumentsRSharedArchivePath_" +static uintptr_t sharedBaseAddress = 0; static int init_classsharing_workaround(void *cd, const prmap_t* pmap, const char* obj_name) { Debugger* dbg = (Debugger*) cd; @@ -577,6 +579,19 @@ return 1; } + psaddr_t sharedBaseAddressAddr = 0; + ps_pglobal_lookup(ph, jvm_name, SHARED_ARCHIVE_PATH_SYM, &sharedBaseAddressAddr); + if (sharedBaseAddressAddr == 0) { + print_debug("can't find symbol 'SharedBaseAddress'\n"); + THROW_NEW_DEBUGGER_EXCEPTION_("can't find 'SharedBaseAddress' flag\n", 1); + } + + sharedBaseAddress = 0; + if (read_pointer(ph, sharedBaseAddressAddr, &sharedBaseAddress) != true) { + print_debug("can't read the value of 'SharedBaseAddress' flag\n"); + THROW_NEW_DEBUGGER_EXCEPTION_("can't get SharedBaseAddress from debuggee", 1); + } + char classes_jsa[PATH_MAX]; psaddr_t sharedArchivePathAddrAddr = 0; ps_pglobal_lookup(ph, jvm_name, SHARED_ARCHIVE_PATH_SYM, &sharedArchivePathAddrAddr); @@ -648,9 +663,14 @@ if (_libsaproc_debug) { for (int m = 0; m < NUM_CDS_REGIONS; m++) { - print_debug("shared file offset %d mapped at 0x%lx, size = %ld, read only? = %d\n", - pheader->_space[m]._file_offset, pheader->_space[m]._addr._base, - pheader->_space[m]._used, pheader->_space[m]._read_only); + if (!pheader->_space[m]._is_heap_region && + !pheader->_space[m]._is_bitmap_region) { + jlong mapping_offset = pheader->_space[m]._mapping_offset; + jlong baseAddress = mapping_offset + (jlong)sharedBaseAddress; + print_debug("shared file offset %d mapped at 0x%lx, size = %ld, read only? = %d\n", + pheader->_space[m]._file_offset, baseAddress, + pheader->_space[m]._used, pheader->_space[m]._read_only); + } } } @@ -1052,11 +1072,14 @@ // We can skip the non-read-only maps. These are mapped as MAP_PRIVATE // and hence will be read by libproc. Besides, the file copy may be // stale because the process might have modified those pages. - if (pheader->_space[m]._read_only) { - jlong baseAddress = (jlong) (uintptr_t) pheader->_space[m]._addr._base; - size_t usedSize = pheader->_space[m]._used; - if (address >= baseAddress && address < (baseAddress + usedSize)) { - // the given address falls in this shared heap area + if (pheader->_space[m]._read_only && + !pheader->_space[m]._is_heap_region && + !pheader->_space[m]._is_bitmap_region) { + jlong mapping_offset = (jlong) (uintptr_t) pheader->_space[m]._mapping_offset; + jlong baseAddress = mapping_offset + (jlong)sharedBaseAddress; + size_t usedSize = pheader->_space[m]._used; + if (address >= baseAddress && address < (baseAddress + usedSize)) { + // the given address falls in this shared metadata area print_debug("found shared map at 0x%lx\n", (long) baseAddress); diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.amd64/src/org/graalvm/compiler/asm/amd64/AMD64BaseAssembler.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.amd64/src/org/graalvm/compiler/asm/amd64/AMD64BaseAssembler.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.amd64/src/org/graalvm/compiler/asm/amd64/AMD64BaseAssembler.java Mon Nov 18 12:40:06 2019 -0500 @@ -944,7 +944,7 @@ } public final boolean vexPrefix(Register dst, Register nds, Register src, AVXSize size, int pp, int mmmmm, int w, int wEvex, boolean checkAVX) { - if (isAVX512Register(dst) || isAVX512Register(nds) || isAVX512Register(src)) { + if (isAVX512Register(dst) || isAVX512Register(nds) || isAVX512Register(src) || size == AVXSize.ZMM) { evexPrefix(dst, Register.None, nds, src, size, pp, mmmmm, wEvex, Z0, B0); return true; } @@ -953,7 +953,7 @@ } public final boolean vexPrefix(Register dst, Register nds, AMD64Address src, AVXSize size, int pp, int mmmmm, int w, int wEvex, boolean checkAVX) { - if (isAVX512Register(dst) || isAVX512Register(nds)) { + if (isAVX512Register(dst) || isAVX512Register(nds) || size == AVXSize.ZMM) { evexPrefix(dst, Register.None, nds, src, size, pp, mmmmm, wEvex, Z0, B0); return true; } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.aarch64.test/src/org/graalvm/compiler/core/aarch64/test/AArch64ElideL2ITest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.aarch64.test/src/org/graalvm/compiler/core/aarch64/test/AArch64ElideL2ITest.java Mon Nov 18 12:40:06 2019 -0500 @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, Arm Limited. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + + +package org.graalvm.compiler.core.aarch64.test; + +import org.graalvm.compiler.lir.LIRInstruction; +import org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp.BinaryConstOp; +import org.junit.Test; + +import java.util.function.Predicate; + +public class AArch64ElideL2ITest extends AArch64MatchRuleTest { + private static final Predicate predicate = op -> { + if (op instanceof BinaryConstOp && op.name().toUpperCase().equals("AND")) { + return true; + } + return false; + }; + + public int addWithSingleL2I(long m) { + return (int) m + 100; + } + + @Test + public void testAddWithSingleL2I() { + test("addWithSingleL2I", 5L); + checkLIR("addWithSingleL2I", predicate, 0); + } + + public int addWithTwoL2I(long m, long n) { + return (int) m + (int) n; + } + + @Test + public void testAddWithTwoL2I() { + test("addWithTwoL2I", 5L, 0x1FFFFFFFFL); + checkLIR("addWithTwoL2I", predicate, 0); + } + + public int addWithTwoNarrow(long m, long n) { + return (int) m + (short) n; + } + + @Test + public void testAddWithTwoNarrow() { + test("addWithTwoNarrow", 0x80000000L, 6L); + checkLIR("addWithTwoNarrow", predicate, 1); + } + + public int subSingleL2I(int m, long n) { + return m - (int) n; + } + + @Test + public void testSubSingleL2I() { + test("subSingleL2I", 13, 40L); + checkLIR("subSingleL2I", predicate, 0); + } + + public int shiftWithSingleL2I(long m) { + return ((int) m) >> 5; + } + + @Test + public void testShiftWithSingleL2I() { + test("shiftWithSingleL2I", 234L); + checkLIR("shiftWithSingleL2I", predicate, 0); + } + + public int shiftWithTwoL2I(long m, long n) { + return (int) m << (int) n; + } + + @Test + public void testShiftWithTwoL2I() { + test("shiftWithTwoL2I", 234L, 3L); + checkLIR("shiftWithTwoL2I", predicate, 0); + } + + public long shiftLongWithL2I(long a, int m) { + return a + ((m & 0xFFFFFFFFL) << (int) a); + } + + @Test + public void testShiftLongWithL2I() { + test("shiftLongWithL2I", 0xFFFFFFFFL, 123); + checkLIR("shiftLongWithL2I", predicate, 1); + } + + public int logicWithTwoL2I(long m, long n) { + return (int) m | (int) n; + } + + @Test + public void testLogicWithTwoL2I() { + test("logicWithTwoL2I", 234L, 3L); + checkLIR("logicWithTwoL2I", predicate, 0); + } + + public int negateL2I(long m) { + return -((int) m); + } + + @Test + public void testNegateL2I() { + test("negateL2I", 0xFFFFFFFFL); + checkLIR("negateL2I", predicate, 0); + } + + public int notL2I(long m) { + return ~((int) m); + } + + @Test + public void testNotL2I() { + test("notL2I", 0xFFFFFFFFL); + checkLIR("notL2I", predicate, 0); + } +} diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64NodeMatchRules.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64NodeMatchRules.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64NodeMatchRules.java Mon Nov 18 12:40:06 2019 -0500 @@ -38,7 +38,6 @@ import org.graalvm.compiler.core.gen.NodeMatchRules; import org.graalvm.compiler.core.match.ComplexMatchResult; import org.graalvm.compiler.core.match.MatchRule; -import org.graalvm.compiler.graph.Node; import org.graalvm.compiler.lir.LIRFrameState; import org.graalvm.compiler.lir.LabelRef; import org.graalvm.compiler.lir.Variable; @@ -58,26 +57,33 @@ import org.graalvm.compiler.nodes.calc.IntegerLessThanNode; import org.graalvm.compiler.nodes.calc.LeftShiftNode; import org.graalvm.compiler.nodes.calc.MulNode; +import org.graalvm.compiler.nodes.calc.NarrowNode; +import org.graalvm.compiler.nodes.calc.NegateNode; import org.graalvm.compiler.nodes.calc.NotNode; import org.graalvm.compiler.nodes.calc.OrNode; import org.graalvm.compiler.nodes.calc.RightShiftNode; import org.graalvm.compiler.nodes.calc.SubNode; +import org.graalvm.compiler.nodes.calc.UnaryNode; import org.graalvm.compiler.nodes.calc.UnsignedRightShiftNode; import org.graalvm.compiler.nodes.calc.XorNode; import org.graalvm.compiler.nodes.memory.Access; public class AArch64NodeMatchRules extends NodeMatchRules { - private static final EconomicMap, AArch64ArithmeticOp> nodeOpMap; + private static final EconomicMap, AArch64ArithmeticOp> binaryOpMap; private static final EconomicMap, AArch64BitFieldOp.BitFieldOpCode> bitFieldOpMap; private static final EconomicMap, AArch64MacroAssembler.ShiftType> shiftTypeMap; static { - nodeOpMap = EconomicMap.create(Equivalence.IDENTITY, 5); - nodeOpMap.put(AddNode.class, AArch64ArithmeticOp.ADD); - nodeOpMap.put(SubNode.class, AArch64ArithmeticOp.SUB); - nodeOpMap.put(AndNode.class, AArch64ArithmeticOp.AND); - nodeOpMap.put(OrNode.class, AArch64ArithmeticOp.OR); - nodeOpMap.put(XorNode.class, AArch64ArithmeticOp.XOR); + binaryOpMap = EconomicMap.create(Equivalence.IDENTITY, 9); + binaryOpMap.put(AddNode.class, AArch64ArithmeticOp.ADD); + binaryOpMap.put(SubNode.class, AArch64ArithmeticOp.SUB); + binaryOpMap.put(MulNode.class, AArch64ArithmeticOp.MUL); + binaryOpMap.put(AndNode.class, AArch64ArithmeticOp.AND); + binaryOpMap.put(OrNode.class, AArch64ArithmeticOp.OR); + binaryOpMap.put(XorNode.class, AArch64ArithmeticOp.XOR); + binaryOpMap.put(LeftShiftNode.class, AArch64ArithmeticOp.SHL); + binaryOpMap.put(RightShiftNode.class, AArch64ArithmeticOp.ASHR); + binaryOpMap.put(UnsignedRightShiftNode.class, AArch64ArithmeticOp.LSHR); bitFieldOpMap = EconomicMap.create(Equivalence.IDENTITY, 2); bitFieldOpMap.put(UnsignedRightShiftNode.class, AArch64BitFieldOp.BitFieldOpCode.UBFX); @@ -153,6 +159,10 @@ }; } + private static boolean isNarrowingLongToInt(NarrowNode narrow) { + return narrow.getInputBits() == 64 && narrow.getResultBits() == 32; + } + @MatchRule("(And (UnsignedRightShift=shift a Constant=b) Constant=c)") @MatchRule("(LeftShift=shift (And a Constant=c) Constant=b)") public ComplexMatchResult unsignedBitField(BinaryNode shift, ValueNode a, ConstantNode b, ConstantNode c) { @@ -194,7 +204,7 @@ @MatchRule("(Sub=binary a (RightShift=shift b Constant))") @MatchRule("(Sub=binary a (UnsignedRightShift=shift b Constant))") public ComplexMatchResult addSubShift(BinaryNode binary, ValueNode a, BinaryNode shift) { - AArch64ArithmeticOp op = nodeOpMap.get(binary.getClass()); + AArch64ArithmeticOp op = binaryOpMap.get(binary.getClass()); assert op != null; return emitBinaryShift(op, a, shift, false); } @@ -218,7 +228,7 @@ @MatchRule("(Xor=binary a (Not (RightShift=shift b Constant)))") @MatchRule("(Xor=binary a (Not (UnsignedRightShift=shift b Constant)))") public ComplexMatchResult logicShift(BinaryNode binary, ValueNode a, BinaryNode shift) { - AArch64ArithmeticOp op = nodeOpMap.get(binary.getClass()); + AArch64ArithmeticOp op = binaryOpMap.get(binary.getClass()); assert op != null; ValueNode operand = binary.getX() == a ? binary.getY() : binary.getX(); boolean isShiftNot = operand instanceof NotNode; @@ -252,6 +262,75 @@ resultKind, AArch64ArithmeticOp.SMULL, true, operand(a), operand(b)); } + @MatchRule("(Add=binary (Narrow=narrow a) (Narrow b))") + @MatchRule("(Sub=binary (Narrow=narrow a) (Narrow b))") + @MatchRule("(Mul=binary (Narrow=narrow a) (Narrow b))") + @MatchRule("(And=binary (Narrow=narrow a) (Narrow b))") + @MatchRule("(Or=binary (Narrow=narrow a) (Narrow b))") + @MatchRule("(Xor=binary (Narrow=narrow a) (Narrow b))") + @MatchRule("(LeftShift=binary (Narrow=narrow a) (Narrow b))") + @MatchRule("(RightShift=binary (Narrow=narrow a) (Narrow b))") + @MatchRule("(UnsignedRightShift=binary (Narrow=narrow a) (Narrow b))") + @MatchRule("(Add=binary a (Narrow=narrow b))") + @MatchRule("(Sub=binary a (Narrow=narrow b))") + @MatchRule("(Mul=binary a (Narrow=narrow b))") + @MatchRule("(And=binary a (Narrow=narrow b))") + @MatchRule("(Or=binary a (Narrow=narrow b))") + @MatchRule("(Xor=binary a (Narrow=narrow b))") + @MatchRule("(LeftShift=binary a (Narrow=narrow b))") + @MatchRule("(RightShift=binary a (Narrow=narrow b))") + @MatchRule("(UnsignedRightShift=binary a (Narrow=narrow b))") + @MatchRule("(Sub=binary (Narrow=narrow a) b)") + @MatchRule("(LeftShift=binary (Narrow=narrow a) b)") + @MatchRule("(RightShift=binary (Narrow=narrow a) b)") + @MatchRule("(UnsignedRightShift=binary (Narrow=narrow a) b)") + public ComplexMatchResult elideL2IForBinary(BinaryNode binary, NarrowNode narrow) { + assert binary.getStackKind().isNumericInteger(); + + ValueNode a = narrow; + ValueNode b = binary.getX() == narrow ? binary.getY() : binary.getX(); + boolean isL2Ia = isNarrowingLongToInt((NarrowNode) a); + boolean isL2Ib = (b instanceof NarrowNode) && isNarrowingLongToInt((NarrowNode) b); + if (!isL2Ia && !isL2Ib) { + return null; + } + // Get the value of L2I NarrowNode as the src value. + ValueNode src1 = isL2Ia ? ((NarrowNode) a).getValue() : a; + ValueNode src2 = isL2Ib ? ((NarrowNode) b).getValue() : b; + + AArch64ArithmeticOp op = binaryOpMap.get(binary.getClass()); + assert op != null; + boolean commutative = binary.getNodeClass().isCommutative(); + LIRKind resultKind = LIRKind.fromJavaKind(gen.target().arch, binary.getStackKind()); + + // Must keep the right operator order for un-commutative binary operations. + if (a == binary.getX()) { + return builder -> getArithmeticLIRGenerator().emitBinary( + resultKind, op, commutative, operand(src1), operand(src2)); + } + return builder -> getArithmeticLIRGenerator().emitBinary( + resultKind, op, commutative, operand(src2), operand(src1)); + } + + @MatchRule("(Negate=unary (Narrow=narrow value))") + @MatchRule("(Not=unary (Narrow=narrow value))") + public ComplexMatchResult elideL2IForUnary(UnaryNode unary, NarrowNode narrow) { + assert unary.getStackKind().isNumericInteger(); + if (!isNarrowingLongToInt(narrow)) { + return null; + } + + AArch64ArithmeticOp op = unary instanceof NegateNode ? AArch64ArithmeticOp.NEG + : AArch64ArithmeticOp.NOT; + return builder -> { + AllocatableValue input = gen.asAllocatable(operand(narrow.getValue())); + LIRKind resultKind = LIRKind.fromJavaKind(gen.target().arch, unary.getStackKind()); + Variable result = gen.newVariable(resultKind); + gen.append(new AArch64ArithmeticOp.UnaryOp(op, result, moveSp(input))); + return result; + }; + } + @MatchRule("(Mul (Negate a) b)") @MatchRule("(Negate (Mul a b))") public ComplexMatchResult multiplyNegate(ValueNode a, ValueNode b) { diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/spi/ForeignCallLinkage.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/spi/ForeignCallLinkage.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/spi/ForeignCallLinkage.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -73,4 +73,13 @@ * the VM to be able to inspect the thread's execution state. */ boolean needsDebugInfo(); + + /** + * Returns true if further cleanup on the float registers is needed after performing the foreign + * call. This is critical on AMD64 as there is a performance penalty switching between legacy + * SSE and AVX instruction while the upper halves of the xmm registers are not zero. + */ + default boolean needsClearUpperVectorRegisters() { + return false; + } } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/HashMapGetTest.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/HashMapGetTest.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/HashMapGetTest.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,7 +52,7 @@ for (IfNode ifNode : lastCompiledGraph.getNodes(IfNode.TYPE)) { LogicNode condition = ifNode.condition(); if (ifNode.getTrueSuccessorProbability() < 0.4 && condition instanceof ObjectEqualsNode) { - assertTrue(ifNode.trueSuccessor().next() instanceof ReturnNode, "Expected return.", ifNode.trueSuccessor(), ifNode.trueSuccessor().next()); + assertTrue(ifNode.trueSuccessor().next() instanceof ReturnNode, "Expected return but got %s (trueSuccessor: %s)", ifNode.trueSuccessor().next(), ifNode.trueSuccessor()); } } } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/UnsafeVirtualizationTest.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/UnsafeVirtualizationTest.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/UnsafeVirtualizationTest.java Mon Nov 18 12:40:06 2019 -0500 @@ -24,8 +24,7 @@ package org.graalvm.compiler.core.test; -import java.lang.reflect.Field; - +import org.graalvm.compiler.core.test.ea.EATestBase.TestClassInt; import org.graalvm.compiler.nodes.StructuredGraph; import org.graalvm.compiler.nodes.StructuredGraph.AllowAssumptions; import org.graalvm.compiler.nodes.spi.CoreProviders; @@ -39,75 +38,45 @@ public class UnsafeVirtualizationTest extends GraalCompilerTest { - public static class Base { - /* - * This padding ensure that the size of the Base class ends up as a multiple of 8, which - * makes the first field of the subclass 8-byte aligned. - */ - double padding; - } - - public static class A extends Base { - int f1; - int f2; - } - - private static final long AF1Offset; - private static final long AF2Offset; - static { - long o1 = -1; - long o2 = -1; - try { - Field f1 = A.class.getDeclaredField("f1"); - Field f2 = A.class.getDeclaredField("f2"); - o1 = UNSAFE.objectFieldOffset(f1); - o2 = UNSAFE.objectFieldOffset(f2); - } catch (NoSuchFieldException | SecurityException e) { - throw new AssertionError(e); - } - AF1Offset = o1; - AF2Offset = o2; - } - public static int unsafeSnippet1(double i1) { - A a = new A(); - UNSAFE.putDouble(a, AF1Offset, i1); - return UNSAFE.getInt(a, AF1Offset) + UNSAFE.getInt(a, AF2Offset); + TestClassInt a = new TestClassInt(); + UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1); + return UNSAFE.getInt(a, TestClassInt.fieldOffset1) + UNSAFE.getInt(a, TestClassInt.fieldOffset2); } public static long unsafeSnippet2a(int i1) { - A a = new A(); - UNSAFE.putDouble(a, AF1Offset, i1); - a.f1 = i1; - return UNSAFE.getLong(a, AF1Offset); + TestClassInt a = new TestClassInt(); + UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1); + a.setFirstField(i1); + return UNSAFE.getLong(a, TestClassInt.fieldOffset1); } public static long unsafeSnippet2b(int i1) { - A a = new A(); - UNSAFE.putDouble(a, AF1Offset, i1); - a.f2 = i1; - return UNSAFE.getLong(a, AF1Offset); + TestClassInt a = new TestClassInt(); + UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1); + a.setSecondField(i1); + return UNSAFE.getLong(a, TestClassInt.fieldOffset1); } public static long unsafeSnippet3a(int i1) { - A a = new A(); - UNSAFE.putDouble(a, AF1Offset, i1); - UNSAFE.putInt(a, AF1Offset, i1); - return UNSAFE.getLong(a, AF1Offset); + TestClassInt a = new TestClassInt(); + UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1); + UNSAFE.putInt(a, TestClassInt.fieldOffset1, i1); + return UNSAFE.getLong(a, TestClassInt.fieldOffset1); } public static long unsafeSnippet3b(int i1) { - A a = new A(); - UNSAFE.putDouble(a, AF1Offset, i1); - UNSAFE.putInt(a, AF2Offset, i1); - return UNSAFE.getLong(a, AF1Offset); + TestClassInt a = new TestClassInt(); + UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1); + UNSAFE.putInt(a, TestClassInt.fieldOffset2, i1); + return UNSAFE.getLong(a, TestClassInt.fieldOffset1); } public static int unsafeSnippet4(double i1) { - A a = new A(); - UNSAFE.putDouble(a, AF1Offset, i1); - UNSAFE.putDouble(a, AF1Offset, i1); - return UNSAFE.getInt(a, AF1Offset) + UNSAFE.getInt(a, AF2Offset); + TestClassInt a = new TestClassInt(); + UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1); + UNSAFE.putDouble(a, TestClassInt.fieldOffset1, i1); + return UNSAFE.getInt(a, TestClassInt.fieldOffset1) + UNSAFE.getInt(a, TestClassInt.fieldOffset2); } @Test @@ -141,7 +110,7 @@ } public void testPartialEscapeReadElimination(String snippet, boolean canonicalizeBefore, Object... args) { - assert AF1Offset % 8 == 0 : "First of the two int-fields must be 8-byte aligned"; + assert TestClassInt.fieldOffset1 % 8 == 0 : "First of the two int-fields must be 8-byte aligned"; ResolvedJavaMethod method = getResolvedJavaMethod(snippet); StructuredGraph graph = parseEager(snippet, AllowAssumptions.NO); diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/ea/EATestBase.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/ea/EATestBase.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/ea/EATestBase.java Mon Nov 18 12:40:06 2019 -0500 @@ -87,6 +87,46 @@ public int hashCode() { return x + 13 * y; } + + public static final long fieldOffset1; + public static final long fieldOffset2; + public static final boolean firstFieldIsX; + + static { + try { + long localFieldOffset1 = UNSAFE.objectFieldOffset(EATestBase.TestClassInt.class.getField("x")); + // Make the fields 8 byte aligned (Required for testing setLong on Architectures + // which does not support unaligned memory access + if (localFieldOffset1 % 8 == 0) { + fieldOffset1 = localFieldOffset1; + fieldOffset2 = UNSAFE.objectFieldOffset(EATestBase.TestClassInt.class.getField("y")); + firstFieldIsX = true; + } else { + fieldOffset1 = UNSAFE.objectFieldOffset(EATestBase.TestClassInt.class.getField("y")); + fieldOffset2 = UNSAFE.objectFieldOffset(EATestBase.TestClassInt.class.getField("z")); + firstFieldIsX = false; + } + assert fieldOffset2 == fieldOffset1 + 4; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void setFirstField(int v) { + if (firstFieldIsX) { + x = v; + } else { + y = v; + } + } + + public void setSecondField(int v) { + if (firstFieldIsX) { + y = v; + } else { + z = v; + } + } } public static class TestClassObject { diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/ea/UnsafeEATest.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/ea/UnsafeEATest.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/ea/UnsafeEATest.java Mon Nov 18 12:40:06 2019 -0500 @@ -48,27 +48,6 @@ public static int zero = 0; - private static final long fieldOffset1; - private static final long fieldOffset2; - - static { - try { - long localFieldOffset1 = UNSAFE.objectFieldOffset(TestClassInt.class.getField("x")); - // Make the fields 8 byte aligned (Required for testing setLong on Architectures which - // does not support unaligned memory access - if (localFieldOffset1 % 8 == 0) { - fieldOffset1 = localFieldOffset1; - fieldOffset2 = UNSAFE.objectFieldOffset(TestClassInt.class.getField("y")); - } else { - fieldOffset1 = UNSAFE.objectFieldOffset(TestClassInt.class.getField("y")); - fieldOffset2 = UNSAFE.objectFieldOffset(TestClassInt.class.getField("z")); - } - assert fieldOffset2 == fieldOffset1 + 4; - } catch (Exception e) { - throw new RuntimeException(e); - } - } - @Override protected void testEscapeAnalysis(String snippet, JavaConstant expectedConstantResult, boolean iterativeEscapeAnalysis) { // Exercise both a graph containing UnsafeAccessNodes and one which has been possibly been @@ -134,8 +113,8 @@ public static int testSimpleIntSnippet() { TestClassInt x = new TestClassInt(); - UNSAFE.putInt(x, fieldOffset1, 101); - return UNSAFE.getInt(x, fieldOffset1); + UNSAFE.putInt(x, TestClassInt.fieldOffset1, 101); + return UNSAFE.getInt(x, TestClassInt.fieldOffset1); } @Test @@ -145,7 +124,7 @@ public static TestClassInt testMaterializedIntSnippet() { TestClassInt x = new TestClassInt(); - UNSAFE.putInt(x, fieldOffset1, 101); + UNSAFE.putInt(x, TestClassInt.fieldOffset1, 101); return x; } @@ -156,8 +135,8 @@ public static double testSimpleDoubleSnippet() { TestClassInt x = new TestClassInt(); - UNSAFE.putDouble(x, fieldOffset1, 10.1); - return UNSAFE.getDouble(x, fieldOffset1); + UNSAFE.putDouble(x, TestClassInt.fieldOffset1, 10.1); + return UNSAFE.getDouble(x, TestClassInt.fieldOffset1); } @Test @@ -167,9 +146,9 @@ public static int testSimpleDoubleOverwriteWithIntSnippet() { TestClassInt x = new TestClassInt(); - UNSAFE.putDouble(x, fieldOffset1, 10.1); - UNSAFE.putInt(x, fieldOffset1, 10); - return UNSAFE.getInt(x, fieldOffset1); + UNSAFE.putDouble(x, TestClassInt.fieldOffset1, 10.1); + UNSAFE.putInt(x, TestClassInt.fieldOffset1, 10); + return UNSAFE.getInt(x, TestClassInt.fieldOffset1); } @Test @@ -183,9 +162,9 @@ public static int testSimpleDoubleOverwriteWithSecondIntSnippet() { TestClassInt x = new TestClassInt(); - UNSAFE.putDouble(x, fieldOffset1, 10.1); - UNSAFE.putInt(x, fieldOffset1, 10); - return UNSAFE.getInt(x, fieldOffset2); + UNSAFE.putDouble(x, TestClassInt.fieldOffset1, 10.1); + UNSAFE.putInt(x, TestClassInt.fieldOffset1, 10); + return UNSAFE.getInt(x, TestClassInt.fieldOffset2); } @Test @@ -199,9 +178,9 @@ public static int testSimpleDoubleOverwriteWithFirstIntSnippet() { TestClassInt x = new TestClassInt(); - UNSAFE.putDouble(x, fieldOffset1, 10.1); - UNSAFE.putInt(x, fieldOffset2, 10); - return UNSAFE.getInt(x, fieldOffset1); + UNSAFE.putDouble(x, TestClassInt.fieldOffset1, 10.1); + UNSAFE.putInt(x, TestClassInt.fieldOffset2, 10); + return UNSAFE.getInt(x, TestClassInt.fieldOffset1); } @Test @@ -215,9 +194,9 @@ public static int testSimpleLongOverwriteWithSecondIntSnippet() { TestClassInt x = new TestClassInt(); - UNSAFE.putLong(x, fieldOffset1, 0x1122334455667788L); - UNSAFE.putInt(x, fieldOffset1, 10); - return UNSAFE.getInt(x, fieldOffset2); + UNSAFE.putLong(x, TestClassInt.fieldOffset1, 0x1122334455667788L); + UNSAFE.putInt(x, TestClassInt.fieldOffset1, 10); + return UNSAFE.getInt(x, TestClassInt.fieldOffset2); } @Test @@ -231,9 +210,9 @@ public static int testSimpleLongOverwriteWithFirstIntSnippet() { TestClassInt x = new TestClassInt(); - UNSAFE.putLong(x, fieldOffset1, 0x1122334455667788L); - UNSAFE.putInt(x, fieldOffset2, 10); - return UNSAFE.getInt(x, fieldOffset1); + UNSAFE.putLong(x, TestClassInt.fieldOffset1, 0x1122334455667788L); + UNSAFE.putInt(x, TestClassInt.fieldOffset2, 10); + return UNSAFE.getInt(x, TestClassInt.fieldOffset1); } @Test @@ -250,12 +229,12 @@ TestClassInt x; if (a) { x = new TestClassInt(0, 0); - UNSAFE.putDouble(x, fieldOffset1, doubleField); + UNSAFE.putDouble(x, TestClassInt.fieldOffset1, doubleField); } else { x = new TestClassInt(); - UNSAFE.putDouble(x, fieldOffset1, doubleField2); + UNSAFE.putDouble(x, TestClassInt.fieldOffset1, doubleField2); } - return UNSAFE.getDouble(x, fieldOffset1); + return UNSAFE.getDouble(x, TestClassInt.fieldOffset1); } static class ExtendedTestClassInt extends TestClassInt { @@ -271,14 +250,14 @@ TestClassInt x; if (value == 1) { x = new TestClassInt(); - UNSAFE.putDouble(x, fieldOffset1, 10); + UNSAFE.putDouble(x, TestClassInt.fieldOffset1, 10); } else { x = new TestClassInt(); - UNSAFE.putInt(x, fieldOffset1, 0); + UNSAFE.putInt(x, TestClassInt.fieldOffset1, 0); } - UNSAFE.putInt(x, fieldOffset1, 0); + UNSAFE.putInt(x, TestClassInt.fieldOffset1, 0); if (value == 2) { - UNSAFE.putInt(x, fieldOffset2, 0); + UNSAFE.putInt(x, TestClassInt.fieldOffset2, 0); } GraalDirectives.deoptimizeAndInvalidate(); return x; @@ -291,7 +270,7 @@ public static TestClassInt testMaterializedDoubleSnippet() { TestClassInt x = new TestClassInt(); - UNSAFE.putDouble(x, fieldOffset1, 10.1); + UNSAFE.putDouble(x, TestClassInt.fieldOffset1, 10.1); return x; } @@ -305,10 +284,10 @@ public static TestClassInt testDeoptDoubleVarSnippet() { TestClassInt x = new TestClassInt(); - UNSAFE.putDouble(x, fieldOffset1, doubleField); + UNSAFE.putDouble(x, TestClassInt.fieldOffset1, doubleField); doubleField2 = 123; try { - doubleField = ((int) UNSAFE.getDouble(x, fieldOffset1)) / zero; + doubleField = ((int) UNSAFE.getDouble(x, TestClassInt.fieldOffset1)) / zero; } catch (RuntimeException e) { return x; } @@ -322,10 +301,10 @@ public static TestClassInt testDeoptDoubleConstantSnippet() { TestClassInt x = new TestClassInt(); - UNSAFE.putDouble(x, fieldOffset1, 10.123); + UNSAFE.putDouble(x, TestClassInt.fieldOffset1, 10.123); doubleField2 = 123; try { - doubleField = ((int) UNSAFE.getDouble(x, fieldOffset1)) / zero; + doubleField = ((int) UNSAFE.getDouble(x, TestClassInt.fieldOffset1)) / zero; } catch (RuntimeException e) { return x; } @@ -342,10 +321,10 @@ public static TestClassInt testDeoptLongVarSnippet() { TestClassInt x = new TestClassInt(); - UNSAFE.putLong(x, fieldOffset1, longField); + UNSAFE.putLong(x, TestClassInt.fieldOffset1, longField); longField2 = 123; try { - longField = UNSAFE.getLong(x, fieldOffset1) / zero; + longField = UNSAFE.getLong(x, TestClassInt.fieldOffset1) / zero; } catch (RuntimeException e) { return x; } @@ -359,10 +338,10 @@ public static TestClassInt testDeoptLongConstantSnippet() { TestClassInt x = new TestClassInt(); - UNSAFE.putLong(x, fieldOffset1, 0x2222222210123L); + UNSAFE.putLong(x, TestClassInt.fieldOffset1, 0x2222222210123L); longField2 = 123; try { - longField = UNSAFE.getLong(x, fieldOffset1) / zero; + longField = UNSAFE.getLong(x, TestClassInt.fieldOffset1) / zero; } catch (RuntimeException e) { return x; } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core/src/org/graalvm/compiler/core/CompilationWrapper.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core/src/org/graalvm/compiler/core/CompilationWrapper.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core/src/org/graalvm/compiler/core/CompilationWrapper.java Mon Nov 18 12:40:06 2019 -0500 @@ -243,6 +243,9 @@ String message; ByteArrayOutputStream baos = new ByteArrayOutputStream(); try (PrintStream ps = new PrintStream(baos)) { + // This output is used by external tools to detect compilation failures. + ps.println("[[[Graal compilation failure]]]"); + ps.printf("%s: Compilation of %s failed:%n", Thread.currentThread(), this); cause.printStackTrace(ps); ps.printf("To disable compilation failure notifications, set %s to %s (e.g., -Dgraal.%s=%s).%n", diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/GraalError.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/GraalError.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/GraalError.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -169,6 +169,14 @@ } /** + * This constructor creates a {@link GraalError} for a given causing Throwable instance with + * detailed error message. + */ + public GraalError(Throwable cause, String msg, Object... args) { + super(format(msg, args), cause); + } + + /** * This constructor creates a {@link GraalError} and adds all the * {@linkplain #addContext(String) context} of another {@link GraalError}. * diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotBackend.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotBackend.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotBackend.java Mon Nov 18 12:40:06 2019 -0500 @@ -26,6 +26,7 @@ import static jdk.vm.ci.amd64.AMD64.r10; import static jdk.vm.ci.amd64.AMD64.rax; +import static jdk.vm.ci.amd64.AMD64.rbp; import static jdk.vm.ci.amd64.AMD64.rsp; import static jdk.vm.ci.code.ValueUtil.asRegister; import static org.graalvm.compiler.core.common.GraalOptions.CanOmitFrame; @@ -93,7 +94,7 @@ @Override protected FrameMapBuilder newFrameMapBuilder(RegisterConfig registerConfig) { RegisterConfig registerConfigNonNull = registerConfig == null ? getCodeCache().getRegisterConfig() : registerConfig; - FrameMap frameMap = new AMD64FrameMap(getCodeCache(), registerConfigNonNull, this); + FrameMap frameMap = new AMD64FrameMap(getCodeCache(), registerConfigNonNull, this, config.preserveFramePointer); return new AMD64FrameMapBuilder(frameMap, getCodeCache(), registerConfigNonNull); } @@ -130,10 +131,12 @@ final boolean isStub; final boolean omitFrame; + final boolean useStandardFrameProlog; - HotSpotFrameContext(boolean isStub, boolean omitFrame) { + HotSpotFrameContext(boolean isStub, boolean omitFrame, boolean useStandardFrameProlog) { this.isStub = isStub; this.omitFrame = omitFrame; + this.useStandardFrameProlog = useStandardFrameProlog; } @Override @@ -157,6 +160,11 @@ // assert asm.position() - verifiedEntryPointOffset >= // PATCHED_VERIFIED_ENTRY_POINT_INSTRUCTION_SIZE; } + if (useStandardFrameProlog) { + // Stack-walking friendly instructions + asm.push(rbp); + asm.movq(rbp, rsp); + } if (!isStub && asm.position() == verifiedEntryPointOffset) { asm.subqWide(rsp, frameSize); assert asm.position() - verifiedEntryPointOffset >= PATCHED_VERIFIED_ENTRY_POINT_INSTRUCTION_SIZE; @@ -180,7 +188,12 @@ assert crb.frameMap.getRegisterConfig().getCalleeSaveRegisters() == null; int frameSize = crb.frameMap.frameSize(); - asm.incrementq(rsp, frameSize); + if (useStandardFrameProlog) { + asm.movq(rsp, rbp); + asm.pop(rbp); + } else { + asm.incrementq(rsp, frameSize); + } } } } @@ -202,7 +215,7 @@ Stub stub = gen.getStub(); Assembler masm = new AMD64MacroAssembler(getTarget()); - HotSpotFrameContext frameContext = new HotSpotFrameContext(stub != null, omitFrame); + HotSpotFrameContext frameContext = new HotSpotFrameContext(stub != null, omitFrame, config.preserveFramePointer); DataBuilder dataBuilder = new HotSpotDataBuilder(getCodeCache().getTarget()); CompilationResultBuilder crb = factory.createBuilder(getCodeCache(), getForeignCalls(), frameMap, masm, dataBuilder, frameContext, options, debug, compilationResult, Register.None); crb.setTotalFrameSize(frameMap.totalFrameSize()); @@ -330,7 +343,7 @@ @Override public RegisterAllocationConfig newRegisterAllocationConfig(RegisterConfig registerConfig, String[] allocationRestrictedTo) { RegisterConfig registerConfigNonNull = registerConfig == null ? getCodeCache().getRegisterConfig() : registerConfig; - return new AMD64HotSpotRegisterAllocationConfig(registerConfigNonNull, allocationRestrictedTo); + return new AMD64HotSpotRegisterAllocationConfig(registerConfigNonNull, allocationRestrictedTo, config.preserveFramePointer); } @Override diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotEpilogueBlockEndOp.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotEpilogueBlockEndOp.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotEpilogueBlockEndOp.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ package org.graalvm.compiler.hotspot.amd64; +import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.ILLEGAL; import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG; import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK; @@ -33,6 +34,7 @@ import org.graalvm.compiler.lir.asm.CompilationResultBuilder; import jdk.vm.ci.meta.AllocatableValue; +import jdk.vm.ci.meta.Value; /** * @see AMD64HotSpotEpilogueOp @@ -43,7 +45,7 @@ super(c); } - @Use({REG, STACK}) protected AllocatableValue savedRbp = PLACEHOLDER; + @Use({REG, STACK, ILLEGAL}) protected AllocatableValue savedRbp = Value.ILLEGAL; protected void leaveFrameAndRestoreRbp(CompilationResultBuilder crb, AMD64MacroAssembler masm) { AMD64HotSpotEpilogueOp.leaveFrameAndRestoreRbp(savedRbp, crb, masm); diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotEpilogueOp.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotEpilogueOp.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotEpilogueOp.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,20 +24,23 @@ package org.graalvm.compiler.hotspot.amd64; -import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG; -import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK; import static jdk.vm.ci.amd64.AMD64.rbp; import static jdk.vm.ci.code.ValueUtil.asRegister; import static jdk.vm.ci.code.ValueUtil.isStackSlot; +import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.ILLEGAL; +import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG; +import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK; import org.graalvm.compiler.asm.amd64.AMD64Address; import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler; import org.graalvm.compiler.lir.LIRInstructionClass; +import org.graalvm.compiler.lir.amd64.AMD64FrameMap; import org.graalvm.compiler.lir.amd64.AMD64LIRInstruction; import org.graalvm.compiler.lir.asm.CompilationResultBuilder; import jdk.vm.ci.code.Register; import jdk.vm.ci.meta.AllocatableValue; +import jdk.vm.ci.meta.Value; /** * Superclass for operations that use the value of RBP saved in a method's prologue. @@ -48,14 +51,17 @@ super(c); } - @Use({REG, STACK}) private AllocatableValue savedRbp = PLACEHOLDER; + @Use({REG, STACK, ILLEGAL}) private AllocatableValue savedRbp = Value.ILLEGAL; protected void leaveFrameAndRestoreRbp(CompilationResultBuilder crb, AMD64MacroAssembler masm) { leaveFrameAndRestoreRbp(savedRbp, crb, masm); } static void leaveFrameAndRestoreRbp(AllocatableValue savedRbp, CompilationResultBuilder crb, AMD64MacroAssembler masm) { - if (isStackSlot(savedRbp)) { + if (Value.ILLEGAL.equals(savedRbp)) { + // RBP will be restored in FrameContext.leave(..). Nothing to do here. + assert ((AMD64FrameMap) crb.frameMap).useStandardFrameProlog() : "savedRbp is not initialized."; + } else if (isStackSlot(savedRbp)) { // Restoring RBP from the stack must be done before the frame is removed masm.movq(rbp, (AMD64Address) crb.asAddress(savedRbp)); } else { diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotLIRGenerator.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotLIRGenerator.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotLIRGenerator.java Mon Nov 18 12:40:06 2019 -0500 @@ -153,7 +153,7 @@ SaveRbp(NoOp placeholder) { this.placeholder = placeholder; AMD64FrameMapBuilder frameMapBuilder = (AMD64FrameMapBuilder) getResult().getFrameMapBuilder(); - this.reservedSlot = frameMapBuilder.allocateRBPSpillSlot(); + this.reservedSlot = config.preserveFramePointer ? null : frameMapBuilder.allocateRBPSpillSlot(); } /** @@ -162,6 +162,7 @@ * @param useStack specifies if rbp must be saved to the stack */ public AllocatableValue finalize(boolean useStack) { + assert !config.preserveFramePointer : "rbp has been pushed onto the stack"; AllocatableValue dst; if (useStack) { dst = reservedSlot; @@ -173,6 +174,10 @@ placeholder.replace(getResult().getLIR(), new MoveFromRegOp(AMD64Kind.QWORD, dst, rbp.asValue(LIRKind.value(AMD64Kind.QWORD)))); return dst; } + + public void remove() { + placeholder.remove(getResult().getLIR()); + } } private SaveRbp saveRbp; @@ -183,10 +188,6 @@ saveRbp = new SaveRbp(placeholder); } - protected SaveRbp getSaveRbp() { - return saveRbp; - } - /** * Helper instruction to reserve a stack slot for the whole method. Note that the actual users * of the stack slot might be inserted after stack slot allocation. This dummy instruction @@ -547,16 +548,21 @@ public void beforeRegisterAllocation() { super.beforeRegisterAllocation(); boolean hasDebugInfo = getResult().getLIR().hasDebugInfo(); - AllocatableValue savedRbp = saveRbp.finalize(hasDebugInfo); + + if (config.preserveFramePointer) { + saveRbp.remove(); + } else { + AllocatableValue savedRbp = saveRbp.finalize(hasDebugInfo); + for (AMD64HotSpotRestoreRbpOp op : epilogueOps) { + op.setSavedRbp(savedRbp); + } + } + if (hasDebugInfo) { getResult().setDeoptimizationRescueSlot(((AMD64FrameMapBuilder) getResult().getFrameMapBuilder()).allocateDeoptimizationRescueSlot()); } - getResult().setMaxInterpreterFrameSize(debugInfoBuilder.maxInterpreterFrameSize()); - for (AMD64HotSpotRestoreRbpOp op : epilogueOps) { - op.setSavedRbp(savedRbp); - } if (BenchmarkCounters.enabled) { // ensure that the rescue slot is available LIRInstruction op = getOrInitRescueSlotOp(); diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotRegisterAllocationConfig.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotRegisterAllocationConfig.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotRegisterAllocationConfig.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -83,8 +83,11 @@ }; // @formatter:on - AMD64HotSpotRegisterAllocationConfig(RegisterConfig registerConfig, String[] allocationRestrictedTo) { + private final boolean useStandardFrameProlog; + + AMD64HotSpotRegisterAllocationConfig(RegisterConfig registerConfig, String[] allocationRestrictedTo, boolean useStandardFrameProlog) { super(registerConfig, allocationRestrictedTo); + this.useStandardFrameProlog = useStandardFrameProlog; } @Override @@ -93,6 +96,9 @@ for (Register reg : registers) { regMap.set(reg.number); } + if (useStandardFrameProlog) { + regMap.clear(rbp.number); + } ArrayList allocatableRegisters = new ArrayList<>(registers.size()); for (Register reg : registerAllocationOrder) { diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotRestoreRbpOp.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotRestoreRbpOp.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotRestoreRbpOp.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,20 +24,9 @@ package org.graalvm.compiler.hotspot.amd64; -import org.graalvm.compiler.core.common.LIRKind; -import org.graalvm.compiler.lir.Variable; - -import jdk.vm.ci.amd64.AMD64Kind; import jdk.vm.ci.meta.AllocatableValue; public interface AMD64HotSpotRestoreRbpOp { - /** - * The type of location (i.e., stack or register) in which RBP is saved is not known until - * initial LIR generation is finished. Until then, we use a placeholder variable so that LIR - * verification is successful. - */ - Variable PLACEHOLDER = new Variable(LIRKind.value(AMD64Kind.QWORD), Integer.MAX_VALUE); - void setSavedRbp(AllocatableValue value); } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotReturnOp.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotReturnOp.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotReturnOp.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -111,10 +111,14 @@ * live value at this point should be the return value in either rax, or in xmm0 with * the upper half of the register unused, so we don't destroy any value here. */ - if (masm.supports(CPUFeature.AVX)) { + if (masm.supports(CPUFeature.AVX) && crb.needsClearUpperVectorRegisters()) { + // If we decide to perform vzeroupper also for stubs (like what JDK9+ C2 does for + // intrinsics that employ AVX2 instruction), we need to be careful that it kills all + // the xmm registers (at least the upper halves). masm.vzeroupper(); } } masm.ret(0); } + } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/CheckGraalIntrinsics.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/CheckGraalIntrinsics.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/CheckGraalIntrinsics.java Mon Nov 18 12:40:06 2019 -0500 @@ -158,11 +158,15 @@ private static Collection add(Collection c, String... elements) { String[] sorted = elements.clone(); Arrays.sort(sorted); - for (int i = 0; i < elements.length; i++) { - if (!elements[i].equals(sorted[i])) { - // Let's keep the list sorted for easier visual inspection - fail("Element %d is out of order, \"%s\"", i, elements[i]); + if (!Arrays.equals(elements, sorted)) { + int width = 2 + Arrays.asList(elements).stream().map(String::length).reduce(0, Integer::max); + Formatter fmt = new Formatter(); + fmt.format("%-" + width + "s | sorted%n", "original"); + fmt.format("%s%n", new String(new char[width * 2 + 2]).replace('\0', '=')); + for (int i = 0; i < elements.length; i++) { + fmt.format("%-" + width + "s | %s%n", elements[i], sorted[i]); } + fail("Elements not sorted alphabetically:%n%s", fmt); } c.addAll(Arrays.asList(elements)); return c; @@ -517,8 +521,8 @@ // AES intrinsics if (!config.useAESIntrinsics) { add(ignore, + "com/sun/crypto/provider/AESCrypt." + aesDecryptName + "([BI[BI)V", "com/sun/crypto/provider/AESCrypt." + aesEncryptName + "([BI[BI)V", - "com/sun/crypto/provider/AESCrypt." + aesDecryptName + "([BI[BI)V", "com/sun/crypto/provider/CipherBlockChaining." + cbcDecryptName + "([BII[BI)I", "com/sun/crypto/provider/CipherBlockChaining." + cbcEncryptName + "([BII[BI)I"); } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/CompilationWrapperTest.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/CompilationWrapperTest.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/CompilationWrapperTest.java Mon Nov 18 12:40:06 2019 -0500 @@ -122,6 +122,12 @@ public void testVMCompilation3() throws IOException, InterruptedException { assumeManagementLibraryIsLoadable(); final int maxProblems = 2; + Probe failurePatternProbe = new Probe("[[[Graal compilation failure]]]", maxProblems) { + @Override + String test() { + return actualOccurrences > 0 && actualOccurrences <= maxProblems ? null : String.format("expected occurrences to be in [1 .. %d]", maxProblems); + } + }; Probe retryingProbe = new Probe("Retrying compilation of", maxProblems) { @Override String test() { @@ -140,6 +146,7 @@ } }; Probe[] probes = { + failurePatternProbe, retryingProbe, adjustmentProbe }; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/CompileTheWorldTest.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/CompileTheWorldTest.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/CompileTheWorldTest.java Mon Nov 18 12:40:06 2019 -0500 @@ -66,7 +66,7 @@ excludeMethodFilters, verbose, harnessOptions, - new OptionValues(initialOptions, HighTier.Options.Inline, false)); + new OptionValues(initialOptions, HighTier.Options.Inline, false, CompilationFailureAction, ExceptionAction.Silent)); ctw.compile(); assert CompilationBailoutAsFailure.getValue(initialOptions) == originalBailoutAction; assert CompilationFailureAction.getValue(initialOptions) == originalFailureAction; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java Mon Nov 18 12:40:06 2019 -0500 @@ -111,6 +111,8 @@ public final boolean useVectorizedMismatchIntrinsic = getFlag("UseVectorizedMismatchIntrinsic", Boolean.class, false); public final boolean useFMAIntrinsics = getFlag("UseFMA", Boolean.class, false); + public final boolean preserveFramePointer = getFlag("PreserveFramePointer", Boolean.class, false); + /* * These are methods because in some JDKs the flags are visible but the stubs themselves haven't * been exported so we have to check both if the flag is on and if we have the stub. diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotForeignCallLinkageImpl.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotForeignCallLinkageImpl.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotForeignCallLinkageImpl.java Mon Nov 18 12:40:06 2019 -0500 @@ -304,4 +304,9 @@ public String getSymbol() { return stub == null ? null : stub.toString(); } + + @Override + public boolean needsClearUpperVectorRegisters() { + return isCompiledStub() && mayContainFP(); + } } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/JVMCIVersionCheck.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/JVMCIVersionCheck.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/JVMCIVersionCheck.java Mon Nov 18 12:40:06 2019 -0500 @@ -43,7 +43,7 @@ */ public final class JVMCIVersionCheck { - private static final Version JVMCI8_MIN_VERSION = new Version3(19, 3, 2); + private static final Version JVMCI_MIN_VERSION = new Version3(19, 3, 4); public interface Version { boolean isLessThan(Version other); @@ -145,7 +145,7 @@ } } - private static void failVersionCheck(Map props, boolean exit, String reason, Object... args) { + private void failVersionCheck(boolean exit, String reason, Object... args) { Formatter errorMessage = new Formatter().format(reason, args); String javaHome = props.get("java.home"); String vmName = props.get("java.vm.name"); @@ -153,10 +153,14 @@ errorMessage.format("this error or to \"warn\" to emit a warning and continue execution.%n"); errorMessage.format("Currently used Java home directory is %s.%n", javaHome); errorMessage.format("Currently used VM configuration is: %s%n", vmName); - if (props.get("java.specification.version").compareTo("1.9") < 0) { + if (javaSpecVersion.compareTo("1.9") < 0) { errorMessage.format("Download the latest JVMCI JDK 8 from https://github.com/graalvm/openjdk8-jvmci-builder/releases"); } else { - errorMessage.format("Download JDK 11 or later."); + if (javaSpecVersion.compareTo("11") == 0 && vmVersion.contains("-jvmci-")) { + errorMessage.format("Download the latest Labs OpenJDK 11 from https://github.com/graalvm/labs-openjdk-11/releases"); + } else { + errorMessage.format("Download JDK 11 or later."); + } } String value = System.getenv("JVMCI_VERSION_CHECK"); if ("warn".equals(value)) { @@ -183,7 +187,7 @@ static void check(Map props, boolean exitOnFailure) { JVMCIVersionCheck checker = new JVMCIVersionCheck(props, props.get("java.specification.version"), props.get("java.vm.version")); - checker.run(exitOnFailure, JVMCI8_MIN_VERSION); + checker.run(exitOnFailure, JVMCI_MIN_VERSION); } /** @@ -202,14 +206,14 @@ Version v = Version.parse(vmVersion); if (v != null) { if (v.isLessThan(minVersion)) { - failVersionCheck(props, exitOnFailure, "The VM does not support the minimum JVMCI API version required by Graal: %s < %s.%n", v, minVersion); + failVersionCheck(exitOnFailure, "The VM does not support the minimum JVMCI API version required by Graal: %s < %s.%n", v, minVersion); } return; } - failVersionCheck(props, exitOnFailure, "The VM does not support the minimum JVMCI API version required by Graal.%n" + + failVersionCheck(exitOnFailure, "The VM does not support the minimum JVMCI API version required by Graal.%n" + "Cannot read JVMCI version from java.vm.version property: %s.%n", vmVersion); } else if (javaSpecVersion.compareTo("11") < 0) { - failVersionCheck(props, exitOnFailure, "Graal is not compatible with the JVMCI API in JDK 9 and 10.%n"); + failVersionCheck(exitOnFailure, "Graal is not compatible with the JVMCI API in JDK 9 and 10.%n"); } else { if (vmVersion.contains("SNAPSHOT")) { return; @@ -218,28 +222,16 @@ // Allow local builds return; } - if (vmVersion.startsWith("11-ea+")) { - String buildString = vmVersion.substring("11-ea+".length()); - try { - int build = Integer.parseInt(buildString); - if (build < 20) { - failVersionCheck(props, exitOnFailure, "Graal requires build 20 or later of JDK 11 early access binary, got build %d.%n", build); - return; - } - } catch (NumberFormatException e) { - failVersionCheck(props, exitOnFailure, "Could not parse the JDK 11 early access build number from java.vm.version property: %s.%n", vmVersion); - return; - } - } else if (vmVersion.contains("-jvmci-")) { + if (vmVersion.contains("-jvmci-")) { // A "labsjdk" Version v = Version.parse(vmVersion); if (v != null) { if (v.isLessThan(minVersion)) { - failVersionCheck(props, exitOnFailure, "The VM does not support the minimum JVMCI API version required by Graal: %s < %s.%n", v, minVersion); + failVersionCheck(exitOnFailure, "The VM does not support the minimum JVMCI API version required by Graal: %s < %s.%n", v, minVersion); } return; } - failVersionCheck(props, exitOnFailure, "The VM does not support the minimum JVMCI API version required by Graal.%n" + + failVersionCheck(exitOnFailure, "The VM does not support the minimum JVMCI API version required by Graal.%n" + "Cannot read JVMCI version from java.vm.version property: %s.%n", vmVersion); } else { // Graal is compatible with all JDK versions as of 11 GA. diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotGraphBuilderPlugins.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotGraphBuilderPlugins.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotGraphBuilderPlugins.java Mon Nov 18 12:40:06 2019 -0500 @@ -35,9 +35,11 @@ import java.lang.invoke.MutableCallSite; import java.lang.invoke.VolatileCallSite; import java.lang.reflect.Array; +import java.lang.reflect.Type; import java.math.BigInteger; import java.util.zip.CRC32; +import jdk.internal.vm.compiler.collections.Pair; import org.graalvm.compiler.api.replacements.SnippetReflectionProvider; import org.graalvm.compiler.core.common.spi.ForeignCallsProvider; import org.graalvm.compiler.core.common.type.ObjectStamp; @@ -115,6 +117,7 @@ import jdk.vm.ci.meta.JavaKind; import jdk.vm.ci.meta.MetaAccessProvider; import jdk.vm.ci.meta.ResolvedJavaMethod; +import jdk.vm.ci.services.Services; import sun.misc.Unsafe; /** @@ -461,21 +464,29 @@ } public static String lookupIntrinsicName(GraalHotSpotVMConfig config, String className, String name1, String name2) { + return selectIntrinsicName(config, className, name1, name2).getLeft(); + } + + /** + * Returns a pair of Strings where the left one represents the matched intrinsic name and the + * right one represents the mismatched intrinsic name. + */ + public static Pair selectIntrinsicName(GraalHotSpotVMConfig config, String className, String name1, String name2) { boolean foundName1 = false; boolean foundName2 = false; - String name = name1; for (VMIntrinsicMethod intrinsic : config.getStore().getIntrinsics()) { if (className.equals(intrinsic.declaringClass)) { if (name1.equals(intrinsic.name)) { foundName1 = true; } else if (name2.equals(intrinsic.name)) { foundName2 = true; - name = name2; } } } - if (foundName1 != foundName2) { - return name; + if (foundName1 && !foundName2) { + return Pair.create(name1, name2); + } else if (foundName2 && !foundName1) { + return Pair.create(name2, name1); } throw GraalError.shouldNotReachHere(); } @@ -500,19 +511,41 @@ String arch = config.osArch; String decryptSuffix = arch.equals("sparc") ? "WithOriginalKey" : ""; - String cbcEncryptName = lookupIntrinsicName(config, "com/sun/crypto/provider/CipherBlockChaining", "implEncrypt", "encrypt"); - String cbcDecryptName = lookupIntrinsicName(config, "com/sun/crypto/provider/CipherBlockChaining", "implDecrypt", "decrypt"); Registration r = new Registration(plugins, "com.sun.crypto.provider.CipherBlockChaining", replacements); - r.registerMethodSubstitution(CipherBlockChainingSubstitutions.class, cbcEncryptName, Receiver.class, byte[].class, int.class, int.class, byte[].class, int.class); - r.registerMethodSubstitution(CipherBlockChainingSubstitutions.class, cbcDecryptName, cbcDecryptName + decryptSuffix, Receiver.class, byte[].class, int.class, int.class, byte[].class, - int.class); - String aesEncryptName = lookupIntrinsicName(config, "com/sun/crypto/provider/AESCrypt", "implEncryptBlock", "encryptBlock"); - String aesDecryptName = lookupIntrinsicName(config, "com/sun/crypto/provider/AESCrypt", "implDecryptBlock", "decryptBlock"); + Pair cbcEncryptName = selectIntrinsicName(config, "com/sun/crypto/provider/CipherBlockChaining", "implEncrypt", "encrypt"); + registerAndCheckMismatch(r, CipherBlockChainingSubstitutions.class, cbcEncryptName, Receiver.class, byte[].class, int.class, int.class, + byte[].class, int.class); + + Pair cbcDecryptName = selectIntrinsicName(config, "com/sun/crypto/provider/CipherBlockChaining", "implDecrypt", "decrypt"); + registerAndCheckMismatch(r, CipherBlockChainingSubstitutions.class, cbcDecryptName, cbcDecryptName.getLeft() + decryptSuffix, Receiver.class, byte[].class, int.class, int.class, + byte[].class, int.class); r = new Registration(plugins, "com.sun.crypto.provider.AESCrypt", replacements); - r.registerMethodSubstitution(AESCryptSubstitutions.class, aesEncryptName, Receiver.class, byte[].class, int.class, byte[].class, int.class); - r.registerMethodSubstitution(AESCryptSubstitutions.class, aesDecryptName, aesDecryptName + decryptSuffix, Receiver.class, byte[].class, int.class, byte[].class, int.class); + + Pair aesEncryptName = selectIntrinsicName(config, "com/sun/crypto/provider/AESCrypt", "implEncryptBlock", "encryptBlock"); + registerAndCheckMismatch(r, AESCryptSubstitutions.class, aesEncryptName, Receiver.class, byte[].class, int.class, byte[].class, int.class); + + Pair aesDecryptName = selectIntrinsicName(config, "com/sun/crypto/provider/AESCrypt", "implDecryptBlock", "decryptBlock"); + registerAndCheckMismatch(r, AESCryptSubstitutions.class, aesDecryptName, aesDecryptName.getLeft() + decryptSuffix, Receiver.class, byte[].class, int.class, byte[].class, int.class); + } + } + + private static void registerAndCheckMismatch(Registration r, Class substitutionClass, Pair intrinsicNames, Type... argumentTypes) { + try { + r.registerMethodSubstitution(substitutionClass, intrinsicNames.getLeft(), argumentTypes); + } catch (NoSuchMethodError e) { + throw new GraalError(e, "Found method named '%s' instead of '%s' in class '%s'. This is most likely because the JVMCI JDK in %s was built on an incompatible base JDK.", + intrinsicNames.getRight(), intrinsicNames.getLeft(), r.getDeclaringType().getTypeName(), Services.getSavedProperties().get("java.home")); + } + } + + private static void registerAndCheckMismatch(Registration r, Class substitutionClass, Pair intrinsicNames, String substituteName, Type... argumentTypes) { + try { + r.registerMethodSubstitution(substitutionClass, intrinsicNames.getLeft(), substituteName, argumentTypes); + } catch (NoSuchMethodError e) { + throw new GraalError(e, "Found method named '%s' instead of '%s' in class '%s'. This is most likely because the JVMCI JDK in %s was built on an incompatible base JDK.", + intrinsicNames.getRight(), intrinsicNames.getLeft(), r.getDeclaringType().getTypeName(), Services.getSavedProperties().get("java.home")); } } @@ -544,21 +577,21 @@ r.registerMethodSubstitution(DigestBaseSubstitutions.class, "implCompressMultiBlock0", Receiver.class, byte[].class, int.class, int.class); } - String implCompressName = lookupIntrinsicName(config, "sun/security/provider/SHA", "implCompress", "implCompress0"); + Pair implCompressName = selectIntrinsicName(config, "sun/security/provider/SHA", "implCompress", "implCompress0"); if (useSha1) { assert config.sha1ImplCompress != 0L; Registration r = new Registration(plugins, "sun.security.provider.SHA", replacements); - r.registerMethodSubstitution(SHASubstitutions.class, implCompressName, "implCompress0", Receiver.class, byte[].class, int.class); + registerAndCheckMismatch(r, SHASubstitutions.class, implCompressName, "implCompress0", Receiver.class, byte[].class, int.class); } if (useSha256) { assert config.sha256ImplCompress != 0L; Registration r = new Registration(plugins, "sun.security.provider.SHA2", replacements); - r.registerMethodSubstitution(SHA2Substitutions.class, implCompressName, "implCompress0", Receiver.class, byte[].class, int.class); + registerAndCheckMismatch(r, SHA2Substitutions.class, implCompressName, "implCompress0", Receiver.class, byte[].class, int.class); } if (useSha512) { assert config.sha512ImplCompress != 0L; Registration r = new Registration(plugins, "sun.security.provider.SHA5", replacements); - r.registerMethodSubstitution(SHA5Substitutions.class, implCompressName, "implCompress0", Receiver.class, byte[].class, int.class); + registerAndCheckMismatch(r, SHA5Substitutions.class, implCompressName, "implCompress0", Receiver.class, byte[].class, int.class); } } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/stubs/Stub.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/stubs/Stub.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/stubs/Stub.java Mon Nov 18 12:40:06 2019 -0500 @@ -27,6 +27,7 @@ import static java.util.Collections.singletonList; import static org.graalvm.compiler.core.GraalCompiler.emitFrontEnd; import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC; +import static org.graalvm.compiler.core.common.GraalOptions.RegisterPressure; import static org.graalvm.compiler.debug.DebugContext.DEFAULT_LOG_STREAM; import static org.graalvm.compiler.debug.DebugOptions.DebugStubsAndSnippets; import static org.graalvm.compiler.hotspot.HotSpotHostBackend.UNCOMMON_TRAP_HANDLER; @@ -76,7 +77,7 @@ /** * Base class for implementing some low level code providing the out-of-line slow path for a snippet - * and/or a callee saved call to a HotSpot C/C++ runtime function or even a another compiled Java + * and/or a callee saved call to a HotSpot C/C++ runtime function or even another compiled Java * method. */ public abstract class Stub { @@ -135,7 +136,9 @@ */ public Stub(OptionValues options, HotSpotProviders providers, HotSpotForeignCallLinkage linkage) { this.linkage = linkage; - this.options = new OptionValues(options, GraalOptions.TraceInlining, GraalOptions.TraceInliningForStubsAndSnippets.getValue(options)); + // The RegisterPressure flag can be ignored by a compilation that runs out of registers, so + // the stub compilation must ignore the flag so that all allocatable registers are saved. + this.options = new OptionValues(options, GraalOptions.TraceInlining, GraalOptions.TraceInliningForStubsAndSnippets.getValue(options), RegisterPressure, null); this.providers = providers; } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64ArrayCompareToOp.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64ArrayCompareToOp.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64ArrayCompareToOp.java Mon Nov 18 12:40:06 2019 -0500 @@ -579,4 +579,9 @@ masm.movzwl(elem2, new AMD64Address(str2, index, scale2, 0)); } } + + @Override + public boolean needsClearUpperVectorRegisters() { + return true; + } } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64ArrayEqualsOp.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64ArrayEqualsOp.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64ArrayEqualsOp.java Mon Nov 18 12:40:06 2019 -0500 @@ -864,4 +864,9 @@ throw new IllegalStateException(); } } + + @Override + public boolean needsClearUpperVectorRegisters() { + return true; + } } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64ArrayIndexOfOp.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64ArrayIndexOfOp.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64ArrayIndexOfOp.java Mon Nov 18 12:40:06 2019 -0500 @@ -644,4 +644,9 @@ private static boolean supports(LIRGeneratorTool tool, CPUFeature cpuFeature) { return ((AMD64) tool.target().arch).getFeatures().contains(cpuFeature); } + + @Override + public boolean needsClearUpperVectorRegisters() { + return true; + } } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64Call.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64Call.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64Call.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -152,6 +152,11 @@ public boolean destroysCallerSavedRegisters() { return callTarget.destroysRegisters(); } + + @Override + public boolean needsClearUpperVectorRegisters() { + return callTarget.needsClearUpperVectorRegisters(); + } } @Opcode("NEAR_FOREIGN_CALL") diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64FrameMap.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64FrameMap.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64FrameMap.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -78,17 +78,15 @@ */ public class AMD64FrameMap extends FrameMap { + private final boolean useStandardFrameProlog; private StackSlot rbpSpillSlot; - public AMD64FrameMap(CodeCacheProvider codeCache, RegisterConfig registerConfig, ReferenceMapBuilderFactory referenceMapFactory) { - this(codeCache, registerConfig, referenceMapFactory, false); - } - - public AMD64FrameMap(CodeCacheProvider codeCache, RegisterConfig registerConfig, ReferenceMapBuilderFactory referenceMapFactory, boolean useBasePointer) { + public AMD64FrameMap(CodeCacheProvider codeCache, RegisterConfig registerConfig, ReferenceMapBuilderFactory referenceMapFactory, boolean useStandardFrameProlog) { super(codeCache, registerConfig, referenceMapFactory); // (negative) offset relative to sp + total frame size - initialSpillSize = returnAddressSize() + (useBasePointer ? getTarget().arch.getWordSize() : 0); - spillSize = initialSpillSize; + this.useStandardFrameProlog = useStandardFrameProlog; + this.initialSpillSize = returnAddressSize() + (useStandardFrameProlog ? getTarget().arch.getWordSize() : 0); + this.spillSize = initialSpillSize; } @Override @@ -141,4 +139,8 @@ spillSlotSize(LIRKind.value(AMD64Kind.QWORD)) : "Deoptimization rescue slot must be the first or second (if there is an RBP spill slot) stack slot"; return allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD)); } + + public boolean useStandardFrameProlog() { + return useStandardFrameProlog; + } } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64StringLatin1InflateOp.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64StringLatin1InflateOp.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64StringLatin1InflateOp.java Mon Nov 18 12:40:06 2019 -0500 @@ -275,4 +275,8 @@ masm.bind(labelDone); } + @Override + public boolean needsClearUpperVectorRegisters() { + return true; + } } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64StringUTF16CompressOp.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64StringUTF16CompressOp.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64StringUTF16CompressOp.java Mon Nov 18 12:40:06 2019 -0500 @@ -340,4 +340,8 @@ masm.bind(labelDone); } + @Override + public boolean needsClearUpperVectorRegisters() { + return true; + } } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64Ternary.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64Ternary.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64Ternary.java Mon Nov 18 12:40:06 2019 -0500 @@ -37,6 +37,7 @@ import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize; import org.graalvm.compiler.lir.LIRInstructionClass; import org.graalvm.compiler.lir.Opcode; +import org.graalvm.compiler.lir.amd64.vector.AMD64VectorInstruction; import org.graalvm.compiler.lir.asm.CompilationResultBuilder; import jdk.vm.ci.meta.AllocatableValue; @@ -49,11 +50,10 @@ /** * Instruction that has two {@link AllocatableValue} operands. */ - public static class ThreeOp extends AMD64LIRInstruction { + public static class ThreeOp extends AMD64VectorInstruction { public static final LIRInstructionClass TYPE = LIRInstructionClass.create(ThreeOp.class); @Opcode private final VexRVMOp opcode; - private final AVXSize size; @Def({REG, HINT}) protected AllocatableValue result; @Use({REG}) protected AllocatableValue x; @@ -65,10 +65,8 @@ @Alive({REG, STACK}) protected AllocatableValue z; public ThreeOp(VexRVMOp opcode, AVXSize size, AllocatableValue result, AllocatableValue x, AllocatableValue y, AllocatableValue z) { - super(TYPE); + super(TYPE, size); this.opcode = opcode; - this.size = size; - this.result = result; this.x = x; this.y = y; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64VZeroUpper.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64VZeroUpper.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64VZeroUpper.java Mon Nov 18 12:40:06 2019 -0500 @@ -32,6 +32,8 @@ import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler; import org.graalvm.compiler.lir.LIRInstructionClass; +import org.graalvm.compiler.lir.amd64.AMD64Call.ForeignCallOp; +import org.graalvm.compiler.lir.amd64.vector.AMD64VectorInstruction; import org.graalvm.compiler.lir.asm.CompilationResultBuilder; import jdk.vm.ci.amd64.AMD64; @@ -40,6 +42,48 @@ import jdk.vm.ci.code.RegisterValue; import jdk.vm.ci.meta.Value; +/** + * vzeroupper is essential to avoid performance penalty during SSE-AVX transition. Specifically, + * once we have executed instructions that modify the upper bits (i.e., 128+) of the YMM registers, + * we need to perform vzeroupper to transit the state to 128bits before executing any SSE + * instructions. We don't need to place vzeroupper between VEX-encoded SSE instructions and legacy + * SSE instructions, nor between AVX instructions and VEX-encoded SSE instructions. + * + * When running Graal on HotSpot, we emit a vzeroupper LIR operation (i.e. an instance of this + * class) before a foreign call to the runtime function where Graal has no knowledge. The underlying + * reason is that HotSpot is SSE-compiled so as to support older CPUs. We also emit a vzeroupper + * instruction (see {@code AMD64HotSpotReturnOp.emitCode}) upon returning, if the current LIR graph + * contains LIR operations that touch the upper bits of the YMM registers, including but not limited + * to {@link AMD64VectorInstruction}, {@link AMD64ArrayCompareToOp}, {@link AMD64ArrayEqualsOp}, + * {@link AMD64ArrayIndexOfOp}, and {@link ForeignCallOp} that invokes to Graal-compiled stubs. For + * the last case, since Graal-compiled stubs is under our control, we don't emit vzeroupper upon + * returning of the stub, but rather do that upon returning of the current method. + * + * On JDK8, C2 does not emit many vzeroupper instructions, potentially because that YMM registers + * are not heavily employed (C2 vectorization starts using YMM registers in 9, source + * https://cr.openjdk.java.net/~vlivanov/talks/2017_Vectorization_in_HotSpot_JVM.pdf) and thus less + * care has been taken to place these instructions. One example is that many intrinsics employ YMM + * registers starting from https://bugs.openjdk.java.net/browse/JDK-8005419, but does not properly + * place vzeroupper upon returning of the intrinsic stub or the caller of the stub. + * + * Most vzeroupper were added in JDK 10 (https://bugs.openjdk.java.net/browse/JDK-8178811), and was + * later restricted on Haswell Xeon due to performance regression + * (https://bugs.openjdk.java.net/browse/JDK-8190934). The actual condition for placing vzeroupper + * is at http://hg.openjdk.java.net/jdk/jdk/file/c7d9df2e470c/src/hotspot/cpu/x86/x86_64.ad#l428. To + * summarize, if nmethod employs YMM registers (or intrinsics which use them, search for + * clear_upper_avx() in opto/library_call.cpp) vzeroupper will be generated on nmethod's exit and + * before any calls in nmethod, because even compiled nmethods can still use only SSE instructions. + * + * This means, if a Java method performs a call to an intrinsic that employs YMM registers, + * C2-compiled code will place a vzeroupper before the call, upon exit of the stub and upon exit of + * this method. Graal will only place the last, because it ensures that Graal-compiled Java method + * and stubs will be consistent on using VEX-encoding. + * + * In SubstrateVM, since the whole image is compiled consistently with or without VEX encoding (the + * later is the default behavior, see {@code NativeImageGenerator.createTarget}), there is no need + * for vzeroupper. For dynamic compilation on a SubstrateVM image, if the image is SSE-compiled, we + * then need vzeroupper when returning from the dynamic compiled code to the pre-built image code. + */ public class AMD64VZeroUpper extends AMD64LIRInstruction { public static final LIRInstructionClass TYPE = LIRInstructionClass.create(AMD64VZeroUpper.class); diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorBinary.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorBinary.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorBinary.java Mon Nov 18 12:40:06 2019 -0500 @@ -40,7 +40,6 @@ import org.graalvm.compiler.lir.LIRInstructionClass; import org.graalvm.compiler.lir.Opcode; import org.graalvm.compiler.lir.amd64.AMD64AddressValue; -import org.graalvm.compiler.lir.amd64.AMD64LIRInstruction; import org.graalvm.compiler.lir.asm.CompilationResultBuilder; import jdk.vm.ci.amd64.AMD64Kind; @@ -48,20 +47,18 @@ public class AMD64VectorBinary { - public static final class AVXBinaryOp extends AMD64LIRInstruction { + public static final class AVXBinaryOp extends AMD64VectorInstruction { public static final LIRInstructionClass TYPE = LIRInstructionClass.create(AVXBinaryOp.class); @Opcode private final VexRVMOp opcode; - private final AVXKind.AVXSize size; @Def({REG}) protected AllocatableValue result; @Use({REG}) protected AllocatableValue x; @Use({REG, STACK}) protected AllocatableValue y; public AVXBinaryOp(VexRVMOp opcode, AVXKind.AVXSize size, AllocatableValue result, AllocatableValue x, AllocatableValue y) { - super(TYPE); + super(TYPE, size); this.opcode = opcode; - this.size = size; this.result = result; this.x = x; this.y = y; @@ -77,22 +74,20 @@ } } - public static final class AVXBinaryConstOp extends AMD64LIRInstruction { + public static final class AVXBinaryConstOp extends AMD64VectorInstruction { public static final LIRInstructionClass TYPE = LIRInstructionClass.create(AVXBinaryConstOp.class); @Opcode private final VexRRIOp opcode; - private final AVXKind.AVXSize size; @Def({REG}) protected AllocatableValue result; @Use({REG}) protected AllocatableValue x; protected int y; public AVXBinaryConstOp(VexRRIOp opcode, AVXKind.AVXSize size, AllocatableValue result, AllocatableValue x, int y) { - super(TYPE); + super(TYPE, size); assert (y & 0xFF) == y; this.opcode = opcode; - this.size = size; this.result = result; this.x = x; this.y = y; @@ -104,22 +99,20 @@ } } - public static final class AVXBinaryConstFloatOp extends AMD64LIRInstruction { + public static final class AVXBinaryConstFloatOp extends AMD64VectorInstruction { public static final LIRInstructionClass TYPE = LIRInstructionClass.create(AVXBinaryConstFloatOp.class); @Opcode private final VexRVMOp opcode; - private final AVXKind.AVXSize size; @Def({REG}) protected AllocatableValue result; @Use({REG}) protected AllocatableValue x; protected ConstantValue y; public AVXBinaryConstFloatOp(VexRVMOp opcode, AVXKind.AVXSize size, AllocatableValue result, AllocatableValue x, ConstantValue y) { - super(TYPE); + super(TYPE, size); assert y.getPlatformKind() == AMD64Kind.SINGLE || y.getPlatformKind() == AMD64Kind.DOUBLE; this.opcode = opcode; - this.size = size; this.result = result; this.x = x; this.y = y; @@ -136,11 +129,10 @@ } } - public static final class AVXBinaryMemoryOp extends AMD64LIRInstruction { + public static final class AVXBinaryMemoryOp extends AMD64VectorInstruction { public static final LIRInstructionClass TYPE = LIRInstructionClass.create(AVXBinaryMemoryOp.class); @Opcode private final VexRVMOp opcode; - private final AVXKind.AVXSize size; @Def({REG}) protected AllocatableValue result; @Use({REG}) protected AllocatableValue x; @@ -148,9 +140,8 @@ @State protected LIRFrameState state; public AVXBinaryMemoryOp(VexRVMOp opcode, AVXKind.AVXSize size, AllocatableValue result, AllocatableValue x, AMD64AddressValue y, LIRFrameState state) { - super(TYPE); + super(TYPE, size); this.opcode = opcode; - this.size = size; this.result = result; this.x = x; this.y = y; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorCompareOp.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorCompareOp.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorCompareOp.java Mon Nov 18 12:40:06 2019 -0500 @@ -35,16 +35,14 @@ import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize; import org.graalvm.compiler.lir.LIRInstructionClass; import org.graalvm.compiler.lir.Opcode; -import org.graalvm.compiler.lir.amd64.AMD64LIRInstruction; import org.graalvm.compiler.lir.asm.CompilationResultBuilder; import jdk.vm.ci.meta.AllocatableValue; -public final class AMD64VectorCompareOp extends AMD64LIRInstruction { +public final class AMD64VectorCompareOp extends AMD64VectorInstruction { public static final LIRInstructionClass TYPE = LIRInstructionClass.create(AMD64VectorCompareOp.class); @Opcode private final VexRMOp opcode; - private final AVXSize size; @Use({REG}) protected AllocatableValue x; @Use({REG, STACK}) protected AllocatableValue y; @@ -53,9 +51,8 @@ } public AMD64VectorCompareOp(VexRMOp opcode, AVXSize size, AllocatableValue x, AllocatableValue y) { - super(TYPE); + super(TYPE, size); this.opcode = opcode; - this.size = size; this.x = x; this.y = y; } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorFloatCompareOp.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorFloatCompareOp.java Mon Nov 18 12:40:06 2019 -0500 @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + +package org.graalvm.compiler.lir.amd64.vector; + +import static jdk.vm.ci.code.ValueUtil.asRegister; +import static jdk.vm.ci.code.ValueUtil.isRegister; +import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG; +import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK; + +import org.graalvm.compiler.asm.amd64.AMD64Address; +import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler; +import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexFloatCompareOp; +import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize; +import org.graalvm.compiler.lir.LIRInstructionClass; +import org.graalvm.compiler.lir.Opcode; +import org.graalvm.compiler.lir.amd64.AMD64LIRInstruction; +import org.graalvm.compiler.lir.asm.CompilationResultBuilder; + +import jdk.vm.ci.meta.AllocatableValue; + +public class AMD64VectorFloatCompareOp extends AMD64LIRInstruction { + public static final LIRInstructionClass TYPE = LIRInstructionClass.create(AMD64VectorFloatCompareOp.class); + + @Opcode private final VexFloatCompareOp opcode; + private final AVXSize size; + @Def({REG}) protected AllocatableValue result; + @Use({REG}) protected AllocatableValue x; + @Use({REG, STACK}) protected AllocatableValue y; + private final VexFloatCompareOp.Predicate predicate; + + public AMD64VectorFloatCompareOp(VexFloatCompareOp opcode, AVXSize size, AllocatableValue result, AllocatableValue x, AllocatableValue y, VexFloatCompareOp.Predicate predicate) { + super(TYPE); + this.opcode = opcode; + this.size = size; + this.result = result; + this.x = x; + this.y = y; + this.predicate = predicate; + } + + @Override + public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { + if (isRegister(y)) { + opcode.emit(masm, size, asRegister(result), asRegister(x), asRegister(y), predicate); + } else { + opcode.emit(masm, size, asRegister(result), asRegister(x), (AMD64Address) crb.asAddress(y), predicate); + } + } + +} diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorInstruction.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorInstruction.java Mon Nov 18 12:40:06 2019 -0500 @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + +package org.graalvm.compiler.lir.amd64.vector; + +import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize; +import org.graalvm.compiler.lir.LIRInstructionClass; +import org.graalvm.compiler.lir.amd64.AMD64LIRInstruction; + +public abstract class AMD64VectorInstruction extends AMD64LIRInstruction { + + public static final LIRInstructionClass TYPE = LIRInstructionClass.create(AMD64VectorInstruction.class); + protected final AVXSize size; + + public AMD64VectorInstruction(LIRInstructionClass c, AVXSize size) { + super(c); + this.size = size; + } + + @Override + public boolean needsClearUpperVectorRegisters() { + return size == AVXSize.YMM || size == AVXSize.ZMM; + } + +} diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorMove.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorMove.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorMove.java Mon Nov 18 12:40:06 2019 -0500 @@ -203,17 +203,15 @@ } } - public abstract static class VectorMemOp extends AMD64LIRInstruction { + public abstract static class VectorMemOp extends AMD64VectorInstruction { - protected final AVXSize size; protected final VexMoveOp op; @Use({COMPOSITE}) protected AMD64AddressValue address; @State protected LIRFrameState state; protected VectorMemOp(LIRInstructionClass c, AVXSize size, VexMoveOp op, AMD64AddressValue address, LIRFrameState state) { - super(c); - this.size = size; + super(c, size); this.op = op; this.address = address; this.state = state; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorUnary.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorUnary.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorUnary.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,19 +50,17 @@ public class AMD64VectorUnary { - public static final class AVXUnaryOp extends AMD64LIRInstruction { + public static final class AVXUnaryOp extends AMD64VectorInstruction { public static final LIRInstructionClass TYPE = LIRInstructionClass.create(AVXUnaryOp.class); @Opcode private final VexRMOp opcode; - private final AVXKind.AVXSize size; @Def({REG}) protected AllocatableValue result; @Use({REG, STACK}) protected AllocatableValue input; public AVXUnaryOp(VexRMOp opcode, AVXKind.AVXSize size, AllocatableValue result, AllocatableValue input) { - super(TYPE); + super(TYPE, size); this.opcode = opcode; - this.size = size; this.result = result; this.input = input; } @@ -77,20 +75,18 @@ } } - public static final class AVXUnaryMemoryOp extends AMD64LIRInstruction { + public static final class AVXUnaryMemoryOp extends AMD64VectorInstruction { public static final LIRInstructionClass TYPE = LIRInstructionClass.create(AVXUnaryMemoryOp.class); @Opcode private final VexRMOp opcode; - private final AVXKind.AVXSize size; @Def({REG}) protected AllocatableValue result; @Use({COMPOSITE}) protected AMD64AddressValue input; @State protected LIRFrameState state; public AVXUnaryMemoryOp(VexRMOp opcode, AVXKind.AVXSize size, AllocatableValue result, AMD64AddressValue input, LIRFrameState state) { - super(TYPE); + super(TYPE, size); this.opcode = opcode; - this.size = size; this.result = result; this.input = input; this.state = state; @@ -105,19 +101,17 @@ } } - public static final class AVXBroadcastOp extends AMD64LIRInstruction { + public static final class AVXBroadcastOp extends AMD64VectorInstruction { public static final LIRInstructionClass TYPE = LIRInstructionClass.create(AVXBroadcastOp.class); @Opcode private final VexRMOp opcode; - private final AVXKind.AVXSize size; @Def({REG}) protected AllocatableValue result; @Use({REG, STACK, CONST}) protected Value input; public AVXBroadcastOp(VexRMOp opcode, AVXKind.AVXSize size, AllocatableValue result, Value input) { - super(TYPE); + super(TYPE, size); this.opcode = opcode; - this.size = size; this.result = result; this.input = input; } @@ -136,20 +130,18 @@ } } - public static final class AVXConvertMemoryOp extends AMD64LIRInstruction { + public static final class AVXConvertMemoryOp extends AMD64VectorInstruction { public static final LIRInstructionClass TYPE = LIRInstructionClass.create(AVXConvertMemoryOp.class); @Opcode private final VexRVMOp opcode; - private final AVXKind.AVXSize size; @Def({REG}) protected AllocatableValue result; @Use({COMPOSITE}) protected AMD64AddressValue input; @State protected LIRFrameState state; public AVXConvertMemoryOp(VexRVMOp opcode, AVXKind.AVXSize size, AllocatableValue result, AMD64AddressValue input, LIRFrameState state) { - super(TYPE); + super(TYPE, size); this.opcode = opcode; - this.size = size; this.result = result; this.input = input; this.state = state; @@ -180,6 +172,8 @@ @Override public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) { + // Note that we assume only XMM-size instructions are emitted here. Loosening this + // restriction would require informing AMD64HotSpotReturnOp when emitting vzeroupper. if (isRegister(input)) { if (!asRegister(input).equals(asRegister(result))) { // clear result register to avoid unnecessary dependency diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/LIRInstruction.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/LIRInstruction.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/LIRInstruction.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -458,4 +458,8 @@ public int hashCode() { return id; } + + public boolean needsClearUpperVectorRegisters() { + return false; + } } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/asm/CompilationResultBuilder.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/asm/CompilationResultBuilder.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/asm/CompilationResultBuilder.java Mon Nov 18 12:40:06 2019 -0500 @@ -692,4 +692,18 @@ public void setConservativeLabelRanges() { this.conservativeLabelOffsets = true; } + + public final boolean needsClearUpperVectorRegisters() { + for (AbstractBlockBase block : lir.codeEmittingOrder()) { + if (block == null) { + continue; + } + for (LIRInstruction op : lir.getLIRforBlock(block)) { + if (op.needsClearUpperVectorRegisters()) { + return true; + } + } + } + return false; + } } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/CompareNode.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/CompareNode.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/CompareNode.java Mon Nov 18 12:40:06 2019 -0500 @@ -354,4 +354,24 @@ return comparison; } + + public static LogicNode createFloatCompareNode(StructuredGraph graph, CanonicalCondition condition, ValueNode x, ValueNode y, boolean unorderedIsTrue, NodeView view) { + LogicNode result = createFloatCompareNode(condition, x, y, unorderedIsTrue, view); + return (result.graph() == null ? graph.addOrUniqueWithInputs(result) : result); + } + + public static LogicNode createFloatCompareNode(CanonicalCondition condition, ValueNode x, ValueNode y, boolean unorderedIsTrue, NodeView view) { + assert x.getStackKind() == y.getStackKind(); + assert x.getStackKind().isNumericFloat(); + + LogicNode comparison; + if (condition == CanonicalCondition.EQ) { + comparison = FloatEqualsNode.create(x, y, view); + } else { + assert condition == CanonicalCondition.LT; + comparison = FloatLessThanNode.create(x, y, unorderedIsTrue, view); + } + + return comparison; + } } diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/graphbuilderconf/InvocationPlugins.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/graphbuilderconf/InvocationPlugins.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/graphbuilderconf/InvocationPlugins.java Mon Nov 18 12:40:06 2019 -0500 @@ -1401,7 +1401,7 @@ if (declaringType instanceof ResolvedJavaSymbol) { return checkResolvable(isOptional, ((ResolvedJavaSymbol) declaringType).getResolved(), binding); } - Class declaringClass = InvocationPlugins.resolveType(declaringType, isOptional); + Class declaringClass = resolveType(declaringType, isOptional); if (declaringClass == null) { return true; } @@ -1411,7 +1411,7 @@ } } else { if (resolveMethod(declaringClass, binding) == null && !isOptional) { - throw new AssertionError(String.format("Method not found: %s.%s%s", declaringClass.getName(), binding.name, binding.argumentsDescriptor)); + throw new NoSuchMethodError(String.format("%s.%s%s", declaringClass.getName(), binding.name, binding.argumentsDescriptor)); } } return true; diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.options/src/org/graalvm/compiler/options/OptionsParser.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.options/src/org/graalvm/compiler/options/OptionsParser.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.options/src/org/graalvm/compiler/options/OptionsParser.java Mon Nov 18 12:40:06 2019 -0500 @@ -132,12 +132,19 @@ throw new IllegalArgumentException(msg.toString()); } + Object value = parseOptionValue(desc, uncheckedValue); + + desc.getOptionKey().update(values, value); + } + + /** Parses a given option value with a known descriptor. */ + public static Object parseOptionValue(OptionDescriptor desc, Object uncheckedValue) { Class optionType = desc.getOptionValueType(); Object value; if (!(uncheckedValue instanceof String)) { if (optionType != uncheckedValue.getClass()) { String type = optionType.getSimpleName(); - throw new IllegalArgumentException(type + " option '" + name + "' must have " + type + " value, not " + uncheckedValue.getClass() + " [toString: " + uncheckedValue + "]"); + throw new IllegalArgumentException(type + " option '" + desc.getName() + "' must have " + type + " value, not " + uncheckedValue.getClass() + " [toString: " + uncheckedValue + "]"); } value = uncheckedValue; } else { @@ -148,7 +155,7 @@ } else if ("false".equals(valueString)) { value = Boolean.FALSE; } else { - throw new IllegalArgumentException("Boolean option '" + name + "' must have value \"true\" or \"false\", not \"" + uncheckedValue + "\""); + throw new IllegalArgumentException("Boolean option '" + desc.getName() + "' must have value \"true\" or \"false\", not \"" + uncheckedValue + "\""); } } else if (optionType == String.class) { value = valueString; @@ -156,7 +163,7 @@ value = ((EnumOptionKey) desc.getOptionKey()).valueOf(valueString); } else { if (valueString.isEmpty()) { - throw new IllegalArgumentException("Non empty value required for option '" + name + "'"); + throw new IllegalArgumentException("Non empty value required for option '" + desc.getName() + "'"); } try { if (optionType == Float.class) { @@ -168,15 +175,14 @@ } else if (optionType == Long.class) { value = Long.valueOf(parseLong(valueString)); } else { - throw new IllegalArgumentException("Wrong value for option '" + name + "'"); + throw new IllegalArgumentException("Wrong value for option '" + desc.getName() + "'"); } } catch (NumberFormatException nfe) { - throw new IllegalArgumentException("Value for option '" + name + "' has invalid number format: " + valueString); + throw new IllegalArgumentException("Value for option '" + desc.getName() + "' has invalid number format: " + valueString); } } } - - desc.getOptionKey().update(values, value); + return value; } private static long parseLong(String v) { diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/StandardGraphBuilderPlugins.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/StandardGraphBuilderPlugins.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/StandardGraphBuilderPlugins.java Mon Nov 18 12:40:06 2019 -0500 @@ -1107,11 +1107,20 @@ @Override public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver unsafe, ValueNode object, ValueNode offset) { + // Opaque mode does not directly impose any ordering constraints with respect to other + // variables beyond Plain mode. + if (accessKind == AccessKind.OPAQUE && StampTool.isPointerAlwaysNull(object)) { + // OFF_HEAP_LOCATION accesses are not floatable => no membars needed for opaque. + return apply(b, targetMethod, unsafe, offset); + } // Emits a null-check for the otherwise unused receiver unsafe.get(); if (accessKind.emitBarriers) { b.add(new MembarNode(accessKind.preReadBarriers)); } + // Raw accesses can be turned into floatable field accesses, the membars preserve the + // access mode. In the case of opaque access, and only for opaque, the location of the + // wrapping membars can be refined to the field location. createUnsafeAccess(object, b, (obj, loc) -> new RawLoadNode(obj, offset, unsafeAccessKind, loc)); if (accessKind.emitBarriers) { b.add(new MembarNode(accessKind.postReadBarriers)); @@ -1144,12 +1153,21 @@ @Override public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver unsafe, ValueNode object, ValueNode offset, ValueNode value) { + // Opaque mode does not directly impose any ordering constraints with respect to other + // variables beyond Plain mode. + if (accessKind == AccessKind.OPAQUE && StampTool.isPointerAlwaysNull(object)) { + // OFF_HEAP_LOCATION accesses are not floatable => no membars needed for opaque. + return apply(b, targetMethod, unsafe, offset, value); + } // Emits a null-check for the otherwise unused receiver unsafe.get(); if (accessKind.emitBarriers) { b.add(new MembarNode(accessKind.preWriteBarriers)); } ValueNode maskedValue = b.maskSubWordValue(value, unsafeAccessKind); + // Raw accesses can be turned into floatable field accesses, the membars preserve the + // access mode. In the case of opaque access, and only for opaque, the location of the + // wrapping membars can be refined to the field location. createUnsafeAccess(object, b, (obj, loc) -> new RawStoreNode(obj, offset, maskedValue, unsafeAccessKind, loc)); if (accessKind.emitBarriers) { b.add(new MembarNode(accessKind.postWriteBarriers)); diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/nodes/MethodHandleNode.java --- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/nodes/MethodHandleNode.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/nodes/MethodHandleNode.java Mon Nov 18 12:40:06 2019 -0500 @@ -259,7 +259,7 @@ Assumptions assumptions = adder.getAssumptions(); ResolvedJavaMethod realTarget = null; - if (target.canBeStaticallyBound()) { + if (target.canBeStaticallyBound() || intrinsicMethod == IntrinsicMethod.LINK_TO_SPECIAL) { realTarget = target; } else { ResolvedJavaType targetType = target.getDeclaringClass(); diff -r 4d58a35f3cfa -r 4ad81e9e30fd src/jdk.zipfs/share/classes/jdk/nio/zipfs/ZipFileSystem.java --- a/src/jdk.zipfs/share/classes/jdk/nio/zipfs/ZipFileSystem.java Wed Nov 13 17:21:31 2019 -0500 +++ b/src/jdk.zipfs/share/classes/jdk/nio/zipfs/ZipFileSystem.java Mon Nov 18 12:40:06 2019 -0500 @@ -2089,7 +2089,7 @@ // Releases the specified inflater to the list of available inflaters. private void releaseDeflater(Deflater def) { synchronized (deflaters) { - if (inflaters.size() < MAX_FLATER) { + if (deflaters.size() < MAX_FLATER) { def.reset(); deflaters.add(def); } else { diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/hotspot/jtreg/ProblemList.txt --- a/test/hotspot/jtreg/ProblemList.txt Wed Nov 13 17:21:31 2019 -0500 +++ b/test/hotspot/jtreg/ProblemList.txt Mon Nov 18 12:40:06 2019 -0500 @@ -54,7 +54,6 @@ compiler/types/correctness/OffTest.java 8225620 solaris-sparcv9 compiler/c2/Test6852078.java 8194310 generic-all -compiler/c2/Test8004741.java 8214904 generic-all compiler/cpuflags/TestAESIntrinsicsOnSupportedConfig.java 8190680 generic-all @@ -206,16 +205,4 @@ vmTestbase/nsk/jdwp/ThreadReference/ForceEarlyReturn/forceEarlyReturn001/forceEarlyReturn001.java 7199837 generic-all -vmTestbase/nsk/jvmti/scenarios/allocation/AP01/ap01t001/TestDescription.java 8233549 generic-all -vmTestbase/nsk/jvmti/scenarios/allocation/AP04/ap04t001/TestDescription.java 8233549 generic-all -vmTestbase/nsk/jvmti/scenarios/allocation/AP04/ap04t002/TestDescription.java 8233549 generic-all -vmTestbase/nsk/jvmti/scenarios/allocation/AP04/ap04t003/TestDescription.java 8233549 generic-all -vmTestbase/nsk/jvmti/scenarios/allocation/AP10/ap10t001/TestDescription.java 8233549 generic-all -vmTestbase/nsk/jvmti/scenarios/allocation/AP12/ap12t001/TestDescription.java 8233549 generic-all -vmTestbase/nsk/jvmti/scenarios/capability/CM02/cm02t001/TestDescription.java 8233549 generic-all -vmTestbase/nsk/jvmti/scenarios/events/EM02/em02t002/TestDescription.java 8233549 generic-all -vmTestbase/nsk/jvmti/scenarios/events/EM02/em02t003/TestDescription.java 8233549 generic-all -vmTestbase/nsk/jvmti/scenarios/events/EM02/em02t005/TestDescription.java 8233549 generic-all -vmTestbase/nsk/jvmti/scenarios/events/EM02/em02t006/TestDescription.java 8233549 generic-all -vmTestbase/nsk/jvmti/scenarios/events/EM07/em07t002/TestDescription.java 8233549 generic-all ############################################################################# diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/hotspot/jtreg/TEST.groups --- a/test/hotspot/jtreg/TEST.groups Wed Nov 13 17:21:31 2019 -0500 +++ b/test/hotspot/jtreg/TEST.groups Mon Nov 18 12:40:06 2019 -0500 @@ -322,6 +322,7 @@ -runtime/cds/appcds/javaldr/GCSharedStringsDuringDump.java \ -runtime/cds/appcds/javaldr/HumongousDuringDump.java \ -runtime/cds/appcds/sharedStrings \ + -runtime/cds/appcds/ArchiveRelocationTest.java \ -runtime/cds/appcds/DumpClassList.java \ -runtime/cds/appcds/ExtraSymbols.java \ -runtime/cds/appcds/LongClassListPath.java \ @@ -332,6 +333,15 @@ -runtime/cds/appcds/UnusedCPDuringDump.java \ -runtime/cds/appcds/VerifierTest_1B.java +hotspot_cds_relocation = \ + gc/g1/TestSharedArchiveWithPreTouch.java \ + runtime/cds \ + runtime/modules/ModulesSymLink.java \ + runtime/modules/PatchModule/PatchModuleCDS.java \ + runtime/modules/PatchModule/PatchModuleClassList.java \ + runtime/NMT \ + serviceability/sa + # A subset of AppCDS tests to be run in tier1 tier1_runtime_appcds = \ runtime/cds/appcds/HelloTest.java \ diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/hotspot/jtreg/compiler/c2/Test8004741.java --- a/test/hotspot/jtreg/compiler/c2/Test8004741.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/hotspot/jtreg/compiler/c2/Test8004741.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,22 +22,33 @@ */ /* - * @test Test8004741.java + * @test * @bug 8004741 * @summary Missing compiled exception handle table entry for multidimensional array allocation * * @requires !vm.graal.enabled + * @library /test/lib + * + * @build sun.hotspot.WhiteBox + * @run driver ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * * @run main/othervm -Xmx128m -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions * -XX:-TieredCompilation -XX:+StressCompiledExceptionHandlers * -XX:+SafepointALot -XX:GuaranteedSafepointInterval=100 + * -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI * compiler.c2.Test8004741 + * * @run main/othervm -Xmx128m -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions * -XX:-TieredCompilation -XX:+StressCompiledExceptionHandlers + * -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI * compiler.c2.Test8004741 */ package compiler.c2; +import sun.hotspot.WhiteBox; + public class Test8004741 extends Thread { static int passed = 0; @@ -58,7 +69,7 @@ } catch (ThreadDeath e) { System.out.println("test got ThreadDeath"); passed++; - throw(e); + throw e; } return ar; } @@ -84,15 +95,11 @@ try { progressLock.wait(); } catch (InterruptedException e) { - e.printStackTrace(); - System.out.println("unexpected InterruptedException"); - fail(); + throw new Error("unexpected InterruptedException", e); } } if (progressState > state) { - System.out.println("unexpected test state change, expected " + - state + " but saw " + progressState); - fail(); + throw new Error("unexpected test state change, state = " + state + ", progressState = " + progressState); } } } @@ -114,9 +121,7 @@ } catch (ThreadDeath e) { // nothing to say, passing was incremented by the test. } catch (Throwable e) { - e.printStackTrace(); - System.out.println("unexpected Throwable " + e); - fail(); + throw new Error("unexpected Throwable " + e, e); } toState(STOPPING); } @@ -144,24 +149,22 @@ test(2, 100); } - // Will this sleep help ensure that the compiler is run? - Thread.sleep(500); - passed = 0; + var method = Test8004741.class.getDeclaredMethod("test", int.class, int.class); + if (!WhiteBox.getWhiteBox().isMethodCompiled(method)) { + throw new Error("test method didn't get compiled"); + } try { test(-1, 100); - System.out.println("Missing NegativeArraySizeException #1"); - fail(); - } catch ( java.lang.NegativeArraySizeException e ) { + throw new AssertionError("Missing NegativeArraySizeException"); + } catch (NegativeArraySizeException e) { System.out.println("Saw expected NegativeArraySizeException #1"); } try { test(100, -1); - fail(); - System.out.println("Missing NegativeArraySizeException #2"); - fail(); - } catch ( java.lang.NegativeArraySizeException e ) { + throw new AssertionError("Missing NegativeArraySizeException"); + } catch (NegativeArraySizeException e) { System.out.println("Saw expected NegativeArraySizeException #2"); } @@ -169,23 +172,10 @@ * as long as it does not crash (the outcome if the exception range * table entry for the array allocation is missing). */ - int N = 12; - for (int n = 0; n < N; n++) { + passed = 0; + int limit = 6; + while (passed != limit) { threadTest(); } - - if (passed > N/2) { - System.out.println("Saw " + passed + " out of " + N + " possible ThreadDeath hits"); - System.out.println("PASSED"); - } else { - System.out.println("Too few ThreadDeath hits; expected at least " + N/2 + - " but saw only " + passed); - fail(); - } } - - static void fail() { - System.out.println("FAILED"); - System.exit(97); - } -}; +} diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/VirtualObjectTestBase.java --- a/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/VirtualObjectTestBase.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/VirtualObjectTestBase.java Mon Nov 18 12:40:06 2019 -0500 @@ -97,9 +97,9 @@ JavaValue[] values = getJavaValues(kinds); test(simple, values, kinds, false); - // Spread a long value across two int fields + // Spread a long value across two int fields starting at an aligned field kinds = Arrays.copyOf(fieldKinds, fieldKinds.length - 1); - kinds[1] = JavaKind.Long; + kinds[fields[0].getOffset() % 8 == 0 ? 0 : 1] = JavaKind.Long; test(simple, getJavaValues(kinds), kinds, false); // Produce a long value for the final int field so there is no matching int field for the diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/hotspot/jtreg/gc/shenandoah/jvmti/TestHeapDump.java --- a/test/hotspot/jtreg/gc/shenandoah/jvmti/TestHeapDump.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/hotspot/jtreg/gc/shenandoah/jvmti/TestHeapDump.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as @@ -40,6 +40,8 @@ * @run main/othervm/native/timeout=300 -agentlib:TestHeapDump -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx128m -XX:ShenandoahGCHeuristics=aggressive -XX:-UseCompressedOops TestHeapDump */ +import java.lang.ref.Reference; + public class TestHeapDump { private static final int NUM_ITER = 10000; @@ -86,6 +88,8 @@ throw new RuntimeException("Expected " + EXPECTED_OBJECTS + " objects, but got " + numObjs); } } + Reference.reachabilityFence(array); + Reference.reachabilityFence(localRoot); } // We look for the instances of this class during the heap scan diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/hotspot/jtreg/runtime/cds/SpaceUtilizationCheck.java --- a/test/hotspot/jtreg/runtime/cds/SpaceUtilizationCheck.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/hotspot/jtreg/runtime/cds/SpaceUtilizationCheck.java Mon Nov 18 12:40:06 2019 -0500 @@ -73,8 +73,8 @@ Matcher matcher = pattern.matcher(line); if (matcher.find()) { String name = matcher.group(1); - if (name.equals("s0") || name.equals("s1")) { - // String regions are listed at the end and they may not be fully occupied. + if (name.equals("bm")) { + // Bitmap space does not have a requested address. break; } else { System.out.println("Checking " + name + " in : " + line); diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/hotspot/jtreg/runtime/cds/appcds/ArchiveRelocationTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/hotspot/jtreg/runtime/cds/appcds/ArchiveRelocationTest.java Mon Nov 18 12:40:06 2019 -0500 @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @comment the test uses -XX:ArchiveRelocationMode=1 to force relocation. + * @requires vm.cds + * @summary Testing relocation of CDS archive (during both dump time and run time) + * @comment JDK-8231610 Relocate the CDS archive if it cannot be mapped to the requested address + * @bug 8231610 + * @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds/test-classes + * @build Hello + * @run driver ClassFileInstaller -jar hello.jar Hello + * @run driver ArchiveRelocationTest + */ + +import jdk.test.lib.process.OutputAnalyzer; +import jtreg.SkippedException; + +public class ArchiveRelocationTest { + public static void main(String... args) throws Exception { + try { + test(true, false); + test(false, true); + test(true, true); + } catch (SkippedException s) { + s.printStackTrace(); + throw new RuntimeException("Archive mapping should always succeed after JDK-8231610 (did the machine run out of memory?)"); + } + } + + static int caseCount = 0; + + // dump_reloc - force relocation of archive during dump time? + // run_reloc - force relocation of archive during run time? + static void test(boolean dump_reloc, boolean run_reloc) throws Exception { + caseCount += 1; + System.out.println("============================================================"); + System.out.println("case = " + caseCount + ", dump = " + dump_reloc + + ", run = " + run_reloc); + System.out.println("============================================================"); + + + String appJar = ClassFileInstaller.getJarPath("hello.jar"); + String mainClass = "Hello"; + String forceRelocation = "-XX:ArchiveRelocationMode=1"; + String dumpRelocArg = dump_reloc ? forceRelocation : "-showversion"; + String runRelocArg = run_reloc ? forceRelocation : "-showversion"; + String logArg = "-Xlog:cds=debug,cds+reloc=debug"; + String unlockArg = "-XX:+UnlockDiagnosticVMOptions"; + + OutputAnalyzer out = TestCommon.dump(appJar, + TestCommon.list(mainClass), + unlockArg, dumpRelocArg, logArg); + if (dump_reloc) { + out.shouldContain("ArchiveRelocationMode == 1: always allocate class space at an alternative address"); + out.shouldContain("Relocating archive from"); + } + + TestCommon.run("-cp", appJar, unlockArg, runRelocArg, logArg, mainClass) + .assertNormalExit(output -> { + if (run_reloc) { + output.shouldContain("runtime archive relocation start"); + output.shouldContain("runtime archive relocation done"); + } + }); + } +} diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/hotspot/jtreg/runtime/cds/appcds/JarBuilder.java --- a/test/hotspot/jtreg/runtime/cds/appcds/JarBuilder.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/hotspot/jtreg/runtime/cds/appcds/JarBuilder.java Mon Nov 18 12:40:06 2019 -0500 @@ -232,7 +232,7 @@ executeProcess(keyTool, "-genkey", "-keystore", "./keystore", "-alias", "mykey", - "-storepass", "abc123", "-keypass", "abc123", + "-storepass", "abc123", "-keypass", "abc123", "-keyalg", "dsa", "-dname", "CN=jvmtest") .shouldHaveExitValue(0); diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive/DynamicArchiveRelocationTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive/DynamicArchiveRelocationTest.java Mon Nov 18 12:40:06 2019 -0500 @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @comment the test uses -XX:ArchiveRelocationMode=1 to force relocation. + * @requires vm.cds + * @summary Testing relocation of dynamic CDS archive (during both dump time and run time) + * @comment JDK-8231610 Relocate the CDS archive if it cannot be mapped to the requested address + * @bug 8231610 + * @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds /test/hotspot/jtreg/runtime/cds/appcds/test-classes + * @build Hello + * @run driver ClassFileInstaller -jar hello.jar Hello + * @run driver DynamicArchiveRelocationTest + */ + +import jdk.test.lib.process.OutputAnalyzer; +import jtreg.SkippedException; + +public class DynamicArchiveRelocationTest extends DynamicArchiveTestBase { + public static void main(String... args) throws Exception { + try { + testOuter(false); + testOuter(true); + } catch (SkippedException s) { + s.printStackTrace(); + throw new RuntimeException("Archive mapping should always succeed after JDK-8231610 (did the machine run out of memory?)"); + } + } + + static void testOuter(boolean dump_base_reloc) throws Exception { + testInner(dump_base_reloc, true, false); + testInner(dump_base_reloc, false, true); + testInner(dump_base_reloc, true, true); + } + + static boolean dump_base_reloc, dump_top_reloc, run_reloc; + + // dump_base_reloc - force relocation of archive when dumping base archive + // dump_top_reloc - force relocation of archive when dumping top archive + // run_reloc - force relocation of archive when running + static void testInner(boolean dump_base_reloc, boolean dump_top_reloc, boolean run_reloc) throws Exception { + DynamicArchiveRelocationTest.dump_base_reloc = dump_base_reloc; + DynamicArchiveRelocationTest.dump_top_reloc = dump_top_reloc; + DynamicArchiveRelocationTest.run_reloc = run_reloc; + + runTest(DynamicArchiveRelocationTest::doTest); + } + + static int caseCount = 0; + static void doTest() throws Exception { + caseCount += 1; + System.out.println("============================================================"); + System.out.println("case = " + caseCount + ", base = " + dump_base_reloc + + ", top = " + dump_top_reloc + + ", run = " + run_reloc); + System.out.println("============================================================"); + + String appJar = ClassFileInstaller.getJarPath("hello.jar"); + String mainClass = "Hello"; + String forceRelocation = "-XX:ArchiveRelocationMode=1"; + String dumpBaseRelocArg = dump_base_reloc ? forceRelocation : "-showversion"; + String dumpTopRelocArg = dump_top_reloc ? forceRelocation : "-showversion"; + String runRelocArg = run_reloc ? forceRelocation : "-showversion"; + String logArg = "-Xlog:cds=debug,cds+reloc=debug"; + + String baseArchiveName = getNewArchiveName("base"); + String topArchiveName = getNewArchiveName("top"); + + String runtimeMsg1 = "runtime archive relocation start"; + String runtimeMsg2 = "runtime archive relocation done"; + String unlockArg = "-XX:+UnlockDiagnosticVMOptions"; + + // (1) Dump base archive (static) + + OutputAnalyzer out = dumpBaseArchive(baseArchiveName, unlockArg, dumpBaseRelocArg, logArg); + if (dump_base_reloc) { + out.shouldContain("ArchiveRelocationMode == 1: always allocate class space at an alternative address"); + out.shouldContain("Relocating archive from"); + } + + // (2) Dump top archive (dynamic) + + dump2(baseArchiveName, topArchiveName, + unlockArg, + dumpTopRelocArg, + logArg, + "-cp", appJar, mainClass) + .assertNormalExit(output -> { + if (dump_top_reloc) { + output.shouldContain(runtimeMsg1); + output.shouldContain(runtimeMsg2); + } + }); + + run2(baseArchiveName, topArchiveName, + unlockArg, + runRelocArg, + logArg, + "-cp", appJar, mainClass) + .assertNormalExit(output -> { + if (run_reloc) { + output.shouldContain(runtimeMsg1); + output.shouldContain(runtimeMsg2); + } + }); + } +} diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive/DynamicArchiveTestBase.java --- a/test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive/DynamicArchiveTestBase.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive/DynamicArchiveTestBase.java Mon Nov 18 12:40:06 2019 -0500 @@ -134,7 +134,7 @@ * Dump the base archive. The JDK's default class list is used (unless otherwise specified * in cmdLineSuffix). */ - public static void dumpBaseArchive(String baseArchiveName, String ... cmdLineSuffix) + public static OutputAnalyzer dumpBaseArchive(String baseArchiveName, String ... cmdLineSuffix) throws Exception { CDSOptions opts = new CDSOptions(); @@ -143,6 +143,7 @@ opts.addSuffix("-Djava.class.path="); OutputAnalyzer out = CDSTestUtils.createArchive(opts); CDSTestUtils.checkDump(out); + return out; } /** diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/com/sun/jdi/JdwpListenTest.java --- a/test/jdk/com/sun/jdi/JdwpListenTest.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/com/sun/jdi/JdwpListenTest.java Mon Nov 18 12:40:06 2019 -0500 @@ -54,6 +54,10 @@ private static final boolean IsWindows = System.getProperty("os.name").toLowerCase().contains("windows"); + // Set to true to allow testing of attach from wrong address (expected to fail). + // It's off by default as it causes test time increase and test interference (see JDK-8231915). + private static boolean allowNegativeTesting = false; + public static void main(String[] args) throws Exception { List addresses = getAddresses(); @@ -87,6 +91,11 @@ throws IOException { log("\nTest: listen at " + listenAddress + ", attaching from " + connectAddress + ", expected: " + (expectedResult ? "SUCCESS" : "FAILURE")); + if (!expectedResult && !allowNegativeTesting) { + log("SKIPPED: negative testing is disabled"); + return; + } + log("Starting listening debuggee at " + listenAddress); try (Debuggee debuggee = Debuggee.launcher("HelloWorld").setAddress(listenAddress + ":0").launch()) { log("Debuggee is listening on " + listenAddress + ":" + debuggee.getAddress()); @@ -103,6 +112,7 @@ } } } + log("PASSED"); } private static void addAddr(List list, InetAddress addr) { diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/java/lang/invoke/TryFinallyTest.java --- a/test/jdk/java/lang/invoke/TryFinallyTest.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/java/lang/invoke/TryFinallyTest.java Mon Nov 18 12:40:06 2019 -0500 @@ -24,8 +24,8 @@ */ /* @test - * @bug 8139885 8150824 8150825 8194238 - * @run testng/othervm -ea -esa test.java.lang.invoke.TryFinallyTest + * @bug 8139885 8150824 8150825 8194238 8233920 + * @run testng/othervm -ea -esa -Xverify:all test.java.lang.invoke.TryFinallyTest */ package test.java.lang.invoke; @@ -55,6 +55,41 @@ assertEquals("Hello, world!", hello.invoke("world")); } + @DataProvider + static Object[][] tryFinallyArgs() { + return new Object[][] { + { boolean.class, true }, + { byte.class, (byte) 2 }, + { short.class, (short) 2 }, + { char.class, (char) 2 }, + { int.class, 2 }, + { long.class, 2L }, + { float.class, 2f }, + { double.class, 2D }, + { Object.class, new Object() } + }; + } + + @Test(dataProvider = "tryFinallyArgs") + public static void testTryFinally(Class argType, Object arg) throws Throwable { + MethodHandle identity = MethodHandles.identity(argType); + MethodHandle tryFinally = MethodHandles.tryFinally( + identity, + MethodHandles.dropArguments(identity, 0, Throwable.class)); + assertEquals(methodType(argType, argType), tryFinally.type()); + assertEquals(arg, tryFinally.invoke(arg)); + } + + @Test(dataProvider = "tryFinallyArgs", expectedExceptions = TryFinally.T1.class) + public static void testTryFinallyException(Class argType, Object arg) throws Throwable { + MethodHandle identity = TryFinally.MH_throwingTargetIdentity.asType(methodType(argType, argType)); + MethodHandle tryFinally = MethodHandles.tryFinally( + identity, + MethodHandles.dropArguments(identity, 0, TryFinally.T1.class)); + assertEquals(methodType(argType, argType), tryFinally.type()); + tryFinally.invoke(arg); // should throw + } + @Test public static void testTryFinallyVoid() throws Throwable { MethodHandle tfVoid = MethodHandles.tryFinally(TryFinally.MH_print, TryFinally.MH_printMore); @@ -175,6 +210,10 @@ throw new T1(); } + static Object throwingTargetIdentity(Object o) throws Throwable { + throw new T1(); + } + static void catchingCleanup(T2 t) throws Throwable { } @@ -189,6 +228,7 @@ static final MethodType MT_voidTarget = methodType(void.class); static final MethodType MT_voidCleanup = methodType(void.class, Throwable.class); static final MethodType MT_throwingTarget = methodType(void.class); + static final MethodType MT_throwingTargetIdentity = methodType(Object.class, Object.class); static final MethodType MT_catchingCleanup = methodType(void.class, T2.class); static final MethodHandle MH_greet; @@ -200,6 +240,7 @@ static final MethodHandle MH_voidTarget; static final MethodHandle MH_voidCleanup; static final MethodHandle MH_throwingTarget; + static final MethodHandle MH_throwingTargetIdentity; static final MethodHandle MH_catchingCleanup; static final MethodHandle MH_dummyTarget; @@ -219,6 +260,7 @@ MH_voidTarget = LOOKUP.findStatic(TRY_FINALLY, "voidTarget", MT_voidTarget); MH_voidCleanup = LOOKUP.findStatic(TRY_FINALLY, "voidCleanup", MT_voidCleanup); MH_throwingTarget = LOOKUP.findStatic(TRY_FINALLY, "throwingTarget", MT_throwingTarget); + MH_throwingTargetIdentity = LOOKUP.findStatic(TRY_FINALLY, "throwingTargetIdentity", MT_throwingTargetIdentity); MH_catchingCleanup = LOOKUP.findStatic(TRY_FINALLY, "catchingCleanup", MT_catchingCleanup); MH_dummyTarget = MethodHandles.dropArguments(MH_voidTarget, 0, int.class, long.class, Object.class, int.class, long.class, Object.class); diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/java/net/DatagramSocket/AddressNotSet.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/jdk/java/net/DatagramSocket/AddressNotSet.java Mon Nov 18 12:40:06 2019 -0500 @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8233141 + * @summary DatagramSocket.send should throw IllegalArgumentException + * when the packet address is not correctly set. + * @run main AddressNotSet + */ + +import java.net.DatagramPacket; +import java.net.DatagramSocket; +import java.net.InetAddress; +import java.net.MulticastSocket; +import java.net.SocketAddress; +import java.nio.channels.DatagramChannel; + +import static java.lang.System.out; + +public class AddressNotSet { + + final InetAddress loopbackAddress = InetAddress.getLoopbackAddress(); + final DatagramSocket serversock; + int i; + AddressNotSet() throws Exception { + serversock = new DatagramSocket(0, loopbackAddress); + } + + public static void main (String args[]) throws Exception { + new AddressNotSet().run(); + } + + public void run() throws Exception { + try (var ss = serversock) { + try (DatagramSocket sock = new DatagramSocket()) { + test(sock); + } + try (DatagramSocket sock = new MulticastSocket()) { + test(sock); + } + try (DatagramSocket sock = DatagramChannel.open().socket()) { + test(sock); + } + } + } + + private void test(DatagramSocket sock) throws Exception { + out.println("Testing with " + sock.getClass()); + InetAddress addr = loopbackAddress; + byte[] buf; + DatagramPacket p; + int port = serversock.getLocalPort(); + SocketAddress connectedAddress = serversock.getLocalSocketAddress(); + + out.println("Checking send to non-connected address ..."); + try { + out.println("Checking send with no packet address"); + buf = ("Hello, server"+(++i)).getBytes(); + p = new DatagramPacket(buf, buf.length); + sock.send(p); + throw new AssertionError("Expected IllegalArgumentException not received"); + } catch (IllegalArgumentException x) { + out.println("Got expected exception: " + x); + } + + out.println("Checking send to valid address"); + buf = ("Hello, server"+(++i)).getBytes(); + p = new DatagramPacket(buf, buf.length, addr, port); + sock.send(p); + serversock.receive(p); + + out.println("Connecting to server address: " + connectedAddress); + sock.connect(connectedAddress); + + try { + out.println("Checking send with different address than connected"); + buf = ("Hello, server"+(++i)).getBytes(); + p = new DatagramPacket(buf, buf.length, addr, port+1); + sock.send(p); + throw new AssertionError("Expected IllegalArgumentException not received"); + } catch (IllegalArgumentException x) { + out.println("Got expected exception: " + x); + } + + out.println("Checking send to valid address"); + buf = ("Hello, server"+(++i)).getBytes(); + p = new DatagramPacket(buf, buf.length, addr, port); + sock.send(p); + serversock.receive(p); + + if (sock instanceof MulticastSocket) { + sock.disconnect(); + testTTL((MulticastSocket)sock); + } + } + + private void testTTL(MulticastSocket sock) throws Exception { + out.println("Testing deprecated send TTL with " + sock.getClass()); + final byte ttl = 100; + InetAddress addr = loopbackAddress; + byte[] buf; + DatagramPacket p; + int port = serversock.getLocalPort(); + + out.println("Checking send to non-connected address ..."); + try { + out.println("Checking send with no packet address"); + buf = ("Hello, server"+(++i)).getBytes(); + p = new DatagramPacket(buf, buf.length); + sock.send(p,ttl); + throw new AssertionError("Expected IllegalArgumentException not received"); + } catch (IllegalArgumentException x) { + out.println("Got expected exception: " + x); + } + + out.println("Connecting to connected address: " + sock); + sock.connect(addr, port); + + try { + out.println("Checking send with different address than connected"); + buf = ("Hello, server"+(++i)).getBytes(); + p = new DatagramPacket(buf, buf.length, addr, port+1); + sock.send(p, ttl); + throw new AssertionError("Expected IllegalArgumentException not received"); + } catch (IllegalArgumentException x) { + out.println("Got expected exception: " + x); + } + } +} diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/java/net/DatagramSocket/SendDatagramToBadAddress.java --- a/test/jdk/java/net/DatagramSocket/SendDatagramToBadAddress.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/java/net/DatagramSocket/SendDatagramToBadAddress.java Mon Nov 18 12:40:06 2019 -0500 @@ -110,12 +110,17 @@ } public void run() throws Exception { - if (OSsupportsFeature()) { print ("running on OS that supports ICMP port unreachable"); } + try (DatagramSocket sock = new DatagramSocket()) { + test(sock); + } + } + + private void test(DatagramSocket sock) throws Exception { + print("Testing with " + sock.getClass()); InetAddress addr = InetAddress.getLoopbackAddress(); - DatagramSocket sock = new DatagramSocket(); DatagramSocket serversock = new DatagramSocket(0); DatagramPacket p; byte[] buf; diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/java/net/MulticastSocket/SetLoopbackMode.java --- a/test/jdk/java/net/MulticastSocket/SetLoopbackMode.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/java/net/MulticastSocket/SetLoopbackMode.java Mon Nov 18 12:40:06 2019 -0500 @@ -33,7 +33,6 @@ import java.net.*; import java.io.IOException; -import java.util.Enumeration; import jdk.test.lib.NetworkConfiguration; public class SetLoopbackMode { diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/java/net/MulticastSocket/SetLoopbackModeIPv4.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/jdk/java/net/MulticastSocket/SetLoopbackModeIPv4.java Mon Nov 18 12:40:06 2019 -0500 @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 4686717 + * @summary Test MulticastSocket.setLoopbackMode with IPv4 addresses + * @library /test/lib + * @build jdk.test.lib.NetworkConfiguration + * jdk.test.lib.Platform + * SetLoopbackMode + * SetLoopbackModeIPv4 + * @run main/othervm -Djava.net.preferIPv4Stack=true SetLoopbackModeIPv4 + */ + +import jdk.test.lib.net.IPSupport; + +public class SetLoopbackModeIPv4 { + public static void main(String[] args) throws Exception { + IPSupport.throwSkippedExceptionIfNonOperational(); + SetLoopbackMode.main(args); + } +} + + diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/java/net/SocketOption/AfterClose.java --- a/test/jdk/java/net/SocketOption/AfterClose.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/java/net/SocketOption/AfterClose.java Mon Nov 18 12:40:06 2019 -0500 @@ -34,8 +34,10 @@ import java.lang.reflect.Method; import java.net.DatagramSocket; import java.net.MulticastSocket; +import java.net.NetworkInterface; import java.net.ServerSocket; import java.net.Socket; +import java.net.SocketException; import java.net.SocketOption; import java.nio.channels.DatagramChannel; import java.nio.channels.ServerSocketChannel; @@ -45,6 +47,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import static java.lang.Boolean.*; @@ -57,9 +60,26 @@ static Map,List> OPTION_VALUES_MAP = optionValueMap(); + static boolean supportsMulticast(NetworkInterface ni) { + try { + return ni.supportsMulticast(); + } catch (SocketException e) { + return false; + } + } + + static List listNetworkInterfaces() { + try { + return NetworkInterface.networkInterfaces() + .filter(AfterClose::supportsMulticast) + .collect(Collectors.toList()); + } catch (Exception e) { } + return List.of(); + } + static Map,List> optionValueMap() { Map,List> map = new HashMap<>(); - map.put(IP_MULTICAST_IF, listOf(TRUE, FALSE) ); + map.put(IP_MULTICAST_IF, listNetworkInterfaces() ); map.put(IP_MULTICAST_LOOP, listOf(TRUE, FALSE) ); map.put(IP_MULTICAST_TTL, listOf(0, 100, 255) ); map.put(IP_TOS, listOf(0, 101, 255) ); diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/java/security/Policy/ExtensiblePolicy/ExtensiblePolicyWithJarTest.java --- a/test/jdk/java/security/Policy/ExtensiblePolicy/ExtensiblePolicyWithJarTest.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/java/security/Policy/ExtensiblePolicy/ExtensiblePolicyWithJarTest.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,6 +69,7 @@ // create key pair for jar signing ProcessTools.executeCommand(KEYTOOL, "-genkey", + "-keyalg", "DSA", "-alias", ALIAS, "-keystore", KEYSTORE, "-storetype", "JKS", diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/java/security/Policy/SignedJar/SignedJarTest.java --- a/test/jdk/java/security/Policy/SignedJar/SignedJarTest.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/java/security/Policy/SignedJar/SignedJarTest.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,6 +66,7 @@ //Creating first key , keystore both.jks ProcessTools.executeCommand(KEYTOOL, "-genkey", + "-keyalg", "DSA", "-alias", "first", "-keystore", KEYSTORE1, "-keypass", PASSWORD, @@ -76,6 +77,7 @@ //Creating Second key, keystore both.jks ProcessTools.executeCommand(KEYTOOL, "-genkey", + "-keyalg", "DSA", // "-storetype","JKS", "-alias", "second", "-keystore", KEYSTORE1, diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/jdk/jfr/event/runtime/TestClassUnloadEvent.java --- a/test/jdk/jdk/jfr/event/runtime/TestClassUnloadEvent.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/jdk/jfr/event/runtime/TestClassUnloadEvent.java Mon Nov 18 12:40:06 2019 -0500 @@ -47,12 +47,6 @@ * @run main/othervm -Xlog:class+unload -Xlog:gc -Xmx16m jdk.jfr.event.runtime.TestClassUnloadEvent */ -/** - * System.gc() will trigger class unloading if -XX:+ExplicitGCInvokesConcurrent is NOT set. - * If this flag is set G1 will never unload classes on System.gc(). - * As far as the "jfr" key guarantees no VM flags are set from the outside - * it should be enough with System.gc(). - */ public final class TestClassUnloadEvent { private final static String TEST_CLASS_NAME = "jdk.jfr.event.runtime.TestClasses"; private final static String EVENT_PATH = EventNames.ClassUnload; diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/jdk/nio/zipfs/CRCWriteTest.java --- a/test/jdk/jdk/nio/zipfs/CRCWriteTest.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/jdk/nio/zipfs/CRCWriteTest.java Mon Nov 18 12:40:06 2019 -0500 @@ -57,10 +57,10 @@ * can be used successfully with the OutputStream write methods */ @Test - private void zipFsOsDeflatedWriteTest() throws Exception { + public void zipFsOsDeflatedWriteTest() throws Exception { Files.deleteIfExists(JAR_FILE); String[] msg = {"Hello ", "Tennis Anyone", "!!!!"}; - Entry e0 = Entry.of("Entry-0", ZipEntry.DEFLATED, Arrays.toString(msg)); + Entry e0 = Entry.of("Entry-0", ZipEntry.DEFLATED, String.join("",msg)); try (FileSystem zipfs = FileSystems.newFileSystem(JAR_FILE, Map.of("create", "true"))) { diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/jdk/nio/zipfs/ReleaseDeflater.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/jdk/jdk/nio/zipfs/ReleaseDeflater.java Mon Nov 18 12:40:06 2019 -0500 @@ -0,0 +1,96 @@ +/* + * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/* + * @test + * @bug 8234011 + * @summary Check that jdk.nio.zipfs.ZipFileSystem doesn't cache more than ZipFileSystem.MAX_FLATER Inflater/Deflater objects + * @run main ReleaseDeflater + * @modules jdk.zipfs/jdk.nio.zipfs:+open + * @author Volker Simonis + */ + +import java.io.InputStream; +import java.io.OutputStream; +import java.lang.reflect.Field; +import java.nio.file.FileSystem; +import java.nio.file.FileSystems; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.spi.FileSystemProvider; +import java.util.List; +import java.util.Map; +import java.util.ArrayList; + +public class ReleaseDeflater { + public static void main(String[] args) throws Throwable { + Path zipFile = Paths.get("ReleaseDeflaterTest.zip"); + try (FileSystem fs = FileSystems.newFileSystem(zipFile, Map.of("create", true))) { + FileSystemProvider zprov = fs.provider(); + Path test = fs.getPath("test.txt"); + int STREAMS = 100; + List ostreams = new ArrayList<>(STREAMS); + List istreams = new ArrayList<>(STREAMS); + for (int i = 0; i < STREAMS; i++) { + OutputStream zos = zprov.newOutputStream(test); + ostreams.add(zos); + zos.write("Hello".getBytes()); + } + for (OutputStream os : ostreams) { + os.close(); + } + for (int i = 0; i < STREAMS; i++) { + InputStream zis = zprov.newInputStream(test); + istreams.add(zis); + } + for (InputStream is : istreams) { + is.close(); + } + try { + Field max_flaters = fs.getClass().getDeclaredField("MAX_FLATER"); + max_flaters.setAccessible(true); + int MAX_FLATERS = max_flaters.getInt(fs); + Field inflaters = fs.getClass().getDeclaredField("inflaters"); + inflaters.setAccessible(true); + int inflater_count = ((List) inflaters.get(fs)).size(); + if (inflater_count > MAX_FLATERS) { + throw new Exception("Too many inflaters " + inflater_count); + } + Field deflaters = fs.getClass().getDeclaredField("deflaters"); + deflaters.setAccessible(true); + int deflater_count = ((List) deflaters.get(fs)).size(); + if (deflater_count > MAX_FLATERS) { + throw new Exception("Too many deflaters " + deflater_count); + } + } catch (NoSuchFieldException nsfe) { + // Probably the implementation has changed, so there's not much we can do... + throw new RuntimeException("Implementation of jdk.nio.zipfs.ZipFileSystem changed - disable or fix the test"); + } + } finally { + Files.deleteIfExists(zipFile); + } + + } +} diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/security/infra/java/security/cert/CertPathValidator/certification/LuxTrustCA.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/jdk/security/infra/java/security/cert/CertPathValidator/certification/LuxTrustCA.java Mon Nov 18 12:40:06 2019 -0500 @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8232019 + * @summary Interoperability tests with LuxTrust Global Root 2 CA + * @build ValidatePathWithParams + * @run main/othervm -Djava.security.debug=certpath LuxTrustCA OCSP + * @run main/othervm -Djava.security.debug=certpath LuxTrustCA CRL + */ + +/* + * Obtain TLS test artifacts for LuxTrust CAs from: + * + * LuxTrust Global Root 2 CA sent test certificates as attachment + */ +public class LuxTrustCA { + + // Owner: CN=LuxTrust Global Qualified CA 3, O=LuxTrust S.A., C=LU + // Issuer: CN=LuxTrust Global Root 2, O=LuxTrust S.A., C=LU + // Serial number: 413dea1a28c2253845558e047f3e2a8b5b9baeae + // Valid from: Fri Mar 06 06:12:15 PST 2015 until: Mon Mar 05 05:21:57 PST 2035 + private static final String INT = "-----BEGIN CERTIFICATE-----\n" + + "MIIGcjCCBFqgAwIBAgIUQT3qGijCJThFVY4Efz4qi1ubrq4wDQYJKoZIhvcNAQEL\n" + + "BQAwRjELMAkGA1UEBhMCTFUxFjAUBgNVBAoMDUx1eFRydXN0IFMuQS4xHzAdBgNV\n" + + "BAMMFkx1eFRydXN0IEdsb2JhbCBSb290IDIwHhcNMTUwMzA2MTQxMjE1WhcNMzUw\n" + + "MzA1MTMyMTU3WjBOMQswCQYDVQQGEwJMVTEWMBQGA1UECgwNTHV4VHJ1c3QgUy5B\n" + + "LjEnMCUGA1UEAwweTHV4VHJ1c3QgR2xvYmFsIFF1YWxpZmllZCBDQSAzMIICIjAN\n" + + "BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAuZ5iXSmFbP80gWb0kieYsImcyIo3\n" + + "QYg+XA3NlwH6QtI0PgZEG9dSo8pM7VMIzE5zq8tgJ50HnPdYflvfhkEKvAW2NuNX\n" + + "6hi/6HK4Nye+kB+INjpfAHmLft3GT95e+frk/t7hJNorK44xzqfWZKLNGysEHIri\n" + + "ddcePWOk3J/VMc9CsSemeZbmeZW1/xXeqolMS7JIDZ3+0DgVCYsKIK+b3sAQ8iqX\n" + + "bQlQyvymG6QyoQoJbuEP23iawRMWKNWk+sjzOkPAAQDtgEEVdggzzudLSM04C5Cj\n" + + "eLlLYuXgljler9bKRk9wW8nkareLZsn9uCDihGXGyC5m9jseGY1KAnlV8usLjBFA\n" + + "iW5OCnzcOg+CPsVucoRhS6uvXcu7VtHRGo5yLysJVv7sj6cx5lMvQKAMLviVi3kp\n" + + "hZKYfqVLAVFJpXTpunY2GayVGf/uOpzNoiSRpcxxYjmAlPKNeTgXVl5Mc0zojgT/\n" + + "MZTGFN7ov7n01yodN6OhfTADacvaKfj2C2CwdCJvMqvlUuCKrvuXbdZrtRm3BZXr\n" + + "ghGhuQmG0Tir7VVCI0WZjVjyHs2rpUcCQ6+D1WymKhzp0mrXdaFzYRce7FrEk69J\n" + + "WzWVp/9/GKnnb0//camavEaI4V64MVxYAir5AL/j7d4JIOqhPPU14ajxmC6dEH84\n" + + "guVs0Lo/dwVTUzsCAwEAAaOCAU4wggFKMBIGA1UdEwEB/wQIMAYBAf8CAQAwQwYD\n" + + "VR0gBDwwOjA4BggrgSsBAQEKAzAsMCoGCCsGAQUFBwIBFh5odHRwczovL3JlcG9z\n" + + "aXRvcnkubHV4dHJ1c3QubHUwagYIKwYBBQUHAQEEXjBcMCsGCCsGAQUFBzABhh9o\n" + + "dHRwOi8vbHRncm9vdC5vY3NwLmx1eHRydXN0Lmx1MC0GCCsGAQUFBzAChiFodHRw\n" + + "Oi8vY2EubHV4dHJ1c3QubHUvTFRHUkNBMi5jcnQwDgYDVR0PAQH/BAQDAgEGMB8G\n" + + "A1UdIwQYMBaAFP8YKHb5SAUsoa7xKxsrslP4S3yzMDMGA1UdHwQsMCowKKAmoCSG\n" + + "Imh0dHA6Ly9jcmwubHV4dHJ1c3QubHUvTFRHUkNBMi5jcmwwHQYDVR0OBBYEFGOP\n" + + "wosDsauO2FNHlh2ZqH32rKh1MA0GCSqGSIb3DQEBCwUAA4ICAQADB6M/edbOO9iJ\n" + + "COnVxayJ1NBk08/BVKlHwe7HBYAzT6Kmo3TbMUwOpcGI2e/NBCR3F4wTzXOVvFmv\n" + + "dBl7sdS6uMSLBTrav+5LChcFDBQj26X5VQDcXkA8b/u6J4Ve7CwoSesYg9H0fsJ3\n" + + "v12QrmGUUao9gbamKP1TFriO+XiIaDLYectruusRktIke9qy8MCpNSarZqr3oD3c\n" + + "/+N5D3lDlGpaz1IL8TpbubFEQHPCr6JiwR+qSqGRfxv8vIvOOAVxe7np5QhtwmCk\n" + + "XdMOPQ/XOOuEA06bez+zHkASX64at7dXru+4JUEbpijjMA+1jbFZr20OeBIQZL7o\n" + + "Est+FF8lFuvmucC9TS9QnlF28WJExvpIknjS7LhFMGXB9w380q38ZOuKjPZpoztY\n" + + "eyUpf8gxzV7fE5Q1okhnsDZ+12vBzBruzJcwtNuXyLyIh3fVN0LunVd+NP2kGjB2\n" + + "t9WD2Y0CaKxWx8snDdrSbAi46TpNoe04eroWgZOvdN0hEmf2d8tYBSJ/XZekU9sC\n" + + "Aww5vxHnXJi6CZHhjt8f1mMhyE2gBvmpk4CFetViO2sG0n/nsxCQNpnclsax/eJu\n" + + "XmGiZ3OPCIRijI5gy3pLRgnbgLyktWoOkmT/gxtWDLfVZwEt52JL8d550KIgttyR\n" + + "qX81LJWGSDdpnzeRVQEnzAt6+RebAQ==\n" + + "-----END CERTIFICATE-----"; + + // Owner: T=Private Person, SERIALNUMBER=00100978855105608536, + // GIVENNAME=TokenPRIActive, SURNAME=Test, CN=TokenPRIActive Test, C=DE + // Issuer: CN=LuxTrust Global Qualified CA 3, O=LuxTrust S.A., C=LU + // Serial number: 3814b6 + // Valid from: Wed Jul 10 04:36:12 PDT 2019 until: Sun Jul 10 04:36:12 PDT 2022 + private static final String VALID = "-----BEGIN CERTIFICATE-----\n" + + "MIIG/jCCBOagAwIBAgIDOBS2MA0GCSqGSIb3DQEBCwUAME4xCzAJBgNVBAYTAkxV\n" + + "MRYwFAYDVQQKDA1MdXhUcnVzdCBTLkEuMScwJQYDVQQDDB5MdXhUcnVzdCBHbG9i\n" + + "YWwgUXVhbGlmaWVkIENBIDMwHhcNMTkwNzEwMTEzNjEyWhcNMjIwNzEwMTEzNjEy\n" + + "WjCBizELMAkGA1UEBhMCREUxHDAaBgNVBAMTE1Rva2VuUFJJQWN0aXZlIFRlc3Qx\n" + + "DTALBgNVBAQTBFRlc3QxFzAVBgNVBCoTDlRva2VuUFJJQWN0aXZlMR0wGwYDVQQF\n" + + "ExQwMDEwMDk3ODg1NTEwNTYwODUzNjEXMBUGA1UEDBMOUHJpdmF0ZSBQZXJzb24w\n" + + "ggGiMA0GCSqGSIb3DQEBAQUAA4IBjwAwggGKAoIBgQDb8l2RJNS7iA9hJFj8aR25\n" + + "kpU/ZQTHl8Z9yrTLhr4VcMWMxqeOQUcUU27SgIuFvU9s/68OuaIhxyu6eohaGCLC\n" + + "wzFFRg8OlsUYuI1QtUEliIjmHOMDqSNIt093+SDV64osnHw5fpfy8V0zehEkd7QR\n" + + "t7Aq38ixCQyxCmNIDJeDCKJT+wwdLaKuw/4SEpR9sygSxZ3kG6kF4icsgYuiOCRx\n" + + "+DrS1wP9kcrQVWQ0bJbGzwxLZXCHaJsWE1Y17mQAO4Iv/9icqDkP3bZBU5GCgbNT\n" + + "JEP2GiUUPU3nL41Tlq03+iDmkS2bpWCtFZmTgUg+1nJEb7PSCJ9VcoflOOFgX/ku\n" + + "TQCJWwhsgyOneEZAg7PpzOj2msxA9RWI88FzRnX/zyjWEpdUCVJ85hFw8u+UZ7k1\n" + + "eF37oOpgNxQMJ+/ey7huneTzyhpFz/TqJpfMmwaGbPL6zmPLAMQalIPQj+68zlcX\n" + + "qyeKVbZU74Vm051kXb/3qs6CeUpT4HrY3UmHWLvOdNkCAwEAAaOCAiUwggIhMB8G\n" + + "A1UdIwQYMBaAFGOPwosDsauO2FNHlh2ZqH32rKh1MGYGCCsGAQUFBwEBBFowWDAn\n" + + "BggrBgEFBQcwAYYbaHR0cDovL3FjYS5vY3NwLmx1eHRydXN0Lmx1MC0GCCsGAQUF\n" + + "BzAChiFodHRwOi8vY2EubHV4dHJ1c3QubHUvTFRHUUNBMy5jcnQwggEuBgNVHSAE\n" + + "ggElMIIBITCCARMGC4g3AQOBKwEBCgMFMIIBAjAqBggrBgEFBQcCARYeaHR0cHM6\n" + + "Ly9yZXBvc2l0b3J5Lmx1eHRydXN0Lmx1MIHTBggrBgEFBQcCAjCBxgyBw0x1eFRy\n" + + "dXN0IENlcnRpZmljYXRlIG5vdCBvbiBTU0NEIGNvbXBsaWFudCB3aXRoIEVUU0kg\n" + + "VFMgMTAyIDA0MiBOQ1AgY2VydGlmaWNhdGUgcG9saWN5LiBLZXkgR2VuZXJhdGlv\n" + + "biBieSBDU1AuIFNvbGUgQXV0aG9yaXNlZCBVc2FnZTogU2lnbmF0dXJlLCBEYXRh\n" + + "IG9yIEVudGl0eSBBdXRoZW50aWNhdGlvbiBhbmQgRGF0YSBFbmNyeXB0aW9uLjAI\n" + + "BgYEAI96AQEwMwYDVR0fBCwwKjAooCagJIYiaHR0cDovL2NybC5sdXh0cnVzdC5s\n" + + "dS9MVEdRQ0EzLmNybDARBgNVHQ4ECgQISND+8GZyXrcwDgYDVR0PAQH/BAQDAgTw\n" + + "MAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggIBAA54w2kGy+hJsYSyrQ5C\n" + + "ft0rasUHQviEiy31H2Z1lh4yEPLiuUsaepdzG4bov/J1RewX1fL7fvErraKK7nNr\n" + + "ioAXNElHtC0wfxGx0xGaCz7xsZIDFgpzyPqS+vd8VKbRCOY66AI+3aPiatCsk+BM\n" + + "Hp9GwW3B1e5EOgXiWVNxzYFtav5QSAj28IEV7ZuN2BIiU+phawRaoFy+4glMB7zE\n" + + "J5AM/Zfi50Q85ljy1kWUueFE3VNDafAUGOF5gTHvkKqj6LznUkqcT8m96Wd0IbF2\n" + + "BLYjnKPF6lGJsivErGqMwQIhlUUMkRQ13/hftL12rIiSjC1C/6cnbxOjWEOGnler\n" + + "Qn2zu2OTGnnrYxp/hojdZggb5Yt9mkM3EmyuqP1W4g0xtMv9q97swm/fHz/rDh8T\n" + + "MqrEOJzz284IM0DXjXq1wkmsZ/6/ueCyf0oBN0csvYspZKmLAydZ+jZmjdKKxX+N\n" + + "dreauHgOq1knLHkMb/YIyA+Oh6SBlNXL4Iae8APQcRGnylHQ1lc/YHTqWh8N1tmn\n" + + "no5r1kVJBYYtkI3oufaLtP7JIazteZlqTN+tubMJhO4xGgt6bqEpQiid9r3UnIjR\n" + + "esLYxXS5qRwSoOSleXT98H75+Ok1WR3ciD4exBR8/KcUtDITvDJhkBHnRHm40jFs\n" + + "5UbHFf98S6G9dqzsqW8+2Bpn\n" + + "-----END CERTIFICATE-----"; + + // Owner: T=Private Person, SERIALNUMBER=00100918135105608625, + // GIVENNAME=TokenPRIREV, SURNAME=Test, CN=TokenPRIREV Test, C=LU + // Issuer: CN=LuxTrust Global Qualified CA 3, O=LuxTrust S.A., C=LU + // Serial number: 3814b8 + // Valid from: Wed Jul 10 04:36:48 PDT 2019 until: Sun Jul 10 04:36:48 PDT 2022 + private static final String REVOKED = "-----BEGIN CERTIFICATE-----\n" + + "MIIG+DCCBOCgAwIBAgIDOBS4MA0GCSqGSIb3DQEBCwUAME4xCzAJBgNVBAYTAkxV\n" + + "MRYwFAYDVQQKDA1MdXhUcnVzdCBTLkEuMScwJQYDVQQDDB5MdXhUcnVzdCBHbG9i\n" + + "YWwgUXVhbGlmaWVkIENBIDMwHhcNMTkwNzEwMTEzNjQ4WhcNMjIwNzEwMTEzNjQ4\n" + + "WjCBhTELMAkGA1UEBhMCTFUxGTAXBgNVBAMTEFRva2VuUFJJUkVWIFRlc3QxDTAL\n" + + "BgNVBAQTBFRlc3QxFDASBgNVBCoTC1Rva2VuUFJJUkVWMR0wGwYDVQQFExQwMDEw\n" + + "MDkxODEzNTEwNTYwODYyNTEXMBUGA1UEDBMOUHJpdmF0ZSBQZXJzb24wggGiMA0G\n" + + "CSqGSIb3DQEBAQUAA4IBjwAwggGKAoIBgQCcm7y4c/D58u6g3m6HGdfiqDXa2yEl\n" + + "H2cAeSb85fsAX08iXfa/U/kmFqqycwp2nsJdfor6HEEqHsmozyjjIWHDEsq+cUre\n" + + "SO6d2Ag29MrxsAWZ1XAol40FcxNN+yEL9Xs5doqqcbz3OoKdxkoWVdYq3D7peizF\n" + + "OER4M2XA0KSLiKXDapDCfTVLE6qRG6Cn5mqnlqbUtkI6vSsda5mWLSNe4Qw/PIMw\n" + + "v7ZDn5dHeHoV6UpZC95Ole5vMQfjAOsy4nRc1zofQz7iPw4ClNzDQSuonaAKSk3Y\n" + + "1KjWPmHshb6BoANL+ce1KuWESKV3D5lBkVVLTeoBkWQu7ViJviF2HE5UoPRSGijO\n" + + "nmGOTZRsjOJXPe7/pEq9SQ477EufnSsoCj1cPCtaowbsO7oswzV/axKMhhZf6nU7\n" + + "0wd9xUuMgMRKBfi026mYK7pdxJ85qE8qKlqeNprje+g1sjxMDbMHARA427Px0IUJ\n" + + "mzIJk0ysAQvbqQVe8QQM/f+PH3mUkXR02H8CAwEAAaOCAiUwggIhMB8GA1UdIwQY\n" + + "MBaAFGOPwosDsauO2FNHlh2ZqH32rKh1MGYGCCsGAQUFBwEBBFowWDAnBggrBgEF\n" + + "BQcwAYYbaHR0cDovL3FjYS5vY3NwLmx1eHRydXN0Lmx1MC0GCCsGAQUFBzAChiFo\n" + + "dHRwOi8vY2EubHV4dHJ1c3QubHUvTFRHUUNBMy5jcnQwggEuBgNVHSAEggElMIIB\n" + + "ITCCARMGC4g3AQOBKwEBCgMFMIIBAjAqBggrBgEFBQcCARYeaHR0cHM6Ly9yZXBv\n" + + "c2l0b3J5Lmx1eHRydXN0Lmx1MIHTBggrBgEFBQcCAjCBxgyBw0x1eFRydXN0IENl\n" + + "cnRpZmljYXRlIG5vdCBvbiBTU0NEIGNvbXBsaWFudCB3aXRoIEVUU0kgVFMgMTAy\n" + + "IDA0MiBOQ1AgY2VydGlmaWNhdGUgcG9saWN5LiBLZXkgR2VuZXJhdGlvbiBieSBD\n" + + "U1AuIFNvbGUgQXV0aG9yaXNlZCBVc2FnZTogU2lnbmF0dXJlLCBEYXRhIG9yIEVu\n" + + "dGl0eSBBdXRoZW50aWNhdGlvbiBhbmQgRGF0YSBFbmNyeXB0aW9uLjAIBgYEAI96\n" + + "AQEwMwYDVR0fBCwwKjAooCagJIYiaHR0cDovL2NybC5sdXh0cnVzdC5sdS9MVEdR\n" + + "Q0EzLmNybDARBgNVHQ4ECgQIS0KUXpWyku0wDgYDVR0PAQH/BAQDAgTwMAwGA1Ud\n" + + "EwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggIBAFSnezuyeRO0sh9e8/1N+2RE6Uhb\n" + + "RIdLKmaS8hMOyUNBapnHfJAdOn7j767qWQjRop5VNCcv0zDOxAqApxFiz4gJdzBY\n" + + "FVrEVwYos8a3BHLXNxfwIWEJ6EjlqI2qI3NjqK8m4M8LTq4G94V2/MOFVpXeCLju\n" + + "r0s+XZep2Sk9J4ofUOc8Gp7IZNhPzIlfKQ+KhnWovde4bpL3zRpp4u7Y580XsBuN\n" + + "kow2Eg84tRzSVizmgLPuRbySHuMo1jGIP7F9FdtOC8VVSjntfCXSEQqOvpH4YZ8S\n" + + "V4qP17CQHPWW1kOHAyXpkAjU+6SOlmF76Adv9nQFTZ6DAnKqiuxmi8EVCv96aFD7\n" + + "Ih+zBF7kj7fghPjUzsVdB6gI4VwuFCXEaAfWlxJS67s1hKnsCyqX3cu+Gnq9aRt+\n" + + "08iaTVEdrKL95AYYobVbnGJ7bH87SpenjLL+CDctXNNDlpJZ8eRYcQe+Q4dg+8L8\n" + + "X8tkXBeRbiZD1U7XwVBnKF6sJmhA4F/h/EJzwX0lp7EU6EO91bSiwD2NFVs+64UR\n" + + "9lftfFFm5In2N3vjDR/3nrCf3Jq9f0g7bTrNJmo+hc0+fD+zlAhZAx+ii2xE1cY1\n" + + "KLH2zXNzPUgIqYGdVQwn1TUFJN8JgGKsXwc+P51nEpgf6JVyK1m7EtVGtr9gF7DI\n" + + "P+4VSqTbTp4/l5n0\n" + + "-----END CERTIFICATE-----"; + + public static void main(String[] args) throws Exception { + + ValidatePathWithParams pathValidator = new ValidatePathWithParams(null); + + if (args.length >= 1 && "CRL".equalsIgnoreCase(args[0])) { + pathValidator.enableCRLCheck(); + } else { + // OCSP check by default + pathValidator.enableOCSPCheck(); + } + + // Validate valid + pathValidator.validate(new String[]{VALID, INT}, + ValidatePathWithParams.Status.GOOD, null, System.out); + + // Validate Revoked + pathValidator.validate(new String[]{REVOKED, INT}, + ValidatePathWithParams.Status.REVOKED, + "Wed Jul 10 04:48:49 PDT 2019", System.out); + } +} diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/lib/cacerts/VerifyCACerts.java --- a/test/jdk/sun/security/lib/cacerts/VerifyCACerts.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/sun/security/lib/cacerts/VerifyCACerts.java Mon Nov 18 12:40:06 2019 -0500 @@ -26,7 +26,7 @@ * @test * @bug 8189131 8198240 8191844 8189949 8191031 8196141 8204923 8195774 8199779 * 8209452 8209506 8210432 8195793 8216577 8222089 8222133 8222137 8222136 - * 8223499 8225392 + * 8223499 8225392 8232019 8234245 * @summary Check root CA entries in cacerts file */ import java.io.ByteArrayInputStream; @@ -52,12 +52,12 @@ + File.separator + "security" + File.separator + "cacerts"; // The numbers of certs now. - private static final int COUNT = 88; + private static final int COUNT = 89; // SHA-256 of cacerts, can be generated with // shasum -a 256 cacerts | sed -e 's/../&:/g' | tr '[:lower:]' '[:upper:]' | cut -c1-95 private static final String CHECKSUM - = "4E:21:94:7C:1D:49:28:BB:34:B0:40:DF:AE:19:B4:41:C6:B5:8A:EE:EB:D5:DE:B4:EF:07:AF:63:18:73:A6:FE"; + = "DE:71:94:6D:6C:5B:2A:AE:5C:AC:D1:3E:07:23:B6:43:CB:F7:32:69:32:04:36:9C:B4:11:78:6A:49:9D:C5:AB"; // map of cert alias to SHA-256 fingerprint @SuppressWarnings("serial") @@ -239,6 +239,8 @@ "DD:69:36:FE:21:F8:F0:77:C1:23:A1:A5:21:C1:22:24:F7:22:55:B7:3E:03:A7:26:06:93:E8:A2:4B:0F:A3:89"); put("globalsignrootcar6 [jdk]", "2C:AB:EA:FE:37:D0:6C:A2:2A:BA:73:91:C0:03:3D:25:98:29:52:C4:53:64:73:49:76:3A:3A:B5:AD:6C:CF:69"); + put("luxtrustglobalroot2ca [jdk]", + "54:45:5F:71:29:C2:0B:14:47:C4:18:F9:97:16:8F:24:C5:8F:C5:02:3B:F5:DA:5B:E2:EB:6E:1D:D8:90:2E:D5"); } }; @@ -268,6 +270,7 @@ if (!checksum.equals(CHECKSUM)) { atLeastOneFailed = true; System.err.println("ERROR: wrong checksum\n" + checksum); + System.err.println("Expected checksum\n" + CHECKSUM); } KeyStore ks = KeyStore.getInstance("JKS"); diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/pkcs12/PBES2Encoding.java --- a/test/jdk/sun/security/pkcs12/PBES2Encoding.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/sun/security/pkcs12/PBES2Encoding.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ // This is a PKCS 12 file using PBES2 to encrypt the cert and key. It is // generated with these commands: // - // keytool -keystore ks -genkeypair -storepass changeit -alias a -dname CN=A + // keytool -keystore ks -genkeypair -keyalg DSA -storepass changeit -alias a -dname CN=A // openssl pkcs12 -in ks -nodes -out kandc -passin pass:changeit // openssl pkcs12 -export -in kandc -out p12 -name a -passout pass:changeit // -certpbe AES-128-CBC -keypbe AES-128-CBC diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/pkcs12/ParamsTest.java --- a/test/jdk/sun/security/pkcs12/ParamsTest.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/sun/security/pkcs12/ParamsTest.java Mon Nov 18 12:40:06 2019 -0500 @@ -114,7 +114,8 @@ check("ksnormal", "a", "wrongpass", "-", IOException.class, "-", "-"); // Add a new entry with password-less settings, still has a storepass - keytool("-keystore ksnormal -genkeypair -storepass changeit -alias b -dname CN=b " + keytool("-keystore ksnormal -genkeypair -keyalg DSA " + + "-storepass changeit -alias b -dname CN=b " + "-J-Dkeystore.pkcs12.certProtectionAlgorithm=NONE " + "-J-Dkeystore.pkcs12.macAlgorithm=NONE"); data = Files.readAllBytes(Path.of("ksnormal")); @@ -146,7 +147,8 @@ check("ksnopass", "a", "wrongpass", "changeit", true, true, true); // Add a new entry with normal settings, still password-less - keytool("-keystore ksnopass -genkeypair -storepass changeit -alias b -dname CN=B"); + keytool("-keystore ksnopass -genkeypair -keyalg DSA " + + "-storepass changeit -alias b -dname CN=B"); data = Files.readAllBytes(Path.of("ksnopass")); shouldNotExist(data, "2"); // no Mac checkAlg(data, "110c010c01000", pbeWithSHA1AndRC4_128_oid); @@ -171,13 +173,15 @@ checkInt(data, "110c1101111", 6666); // cert ic // keypbe alg cannot be NONE - keytool("-keystore ksnewic -genkeypair -storepass changeit -alias b -dname CN=B " + keytool("-keystore ksnewic -genkeypair -keyalg DSA " + + "-storepass changeit -alias b -dname CN=B " + "-J-Dkeystore.pkcs12.keyProtectionAlgorithm=NONE") .shouldContain("NONE AlgorithmParameters not available") .shouldHaveExitValue(1); // new entry new keypbe alg (and default ic), else unchanged - keytool("-keystore ksnewic -genkeypair -storepass changeit -alias b -dname CN=B " + keytool("-keystore ksnewic -genkeypair -keyalg DSA " + + "-storepass changeit -alias b -dname CN=B " + "-J-Dkeystore.pkcs12.keyProtectionAlgorithm=PBEWithSHA1AndRC4_128"); data = Files.readAllBytes(Path.of("ksnewic")); checkInt(data, "22", 5555); // Mac ic @@ -336,7 +340,8 @@ // still prompt for keypass for genkeypair and certreq SecurityTools.setResponse("changeit", "changeit"); - keytool("-keystore ksnopassnew -genkeypair -alias a -dname CN=A " + keytool("-keystore ksnopassnew -genkeypair -keyalg DSA " + + "-alias a -dname CN=A " + "-J-Dkeystore.pkcs12.certProtectionAlgorithm=NONE " + "-J-Dkeystore.pkcs12.macAlgorithm=NONE") .shouldNotContain("Enter keystore password:") @@ -351,7 +356,8 @@ // params only read on demand // keyPbeIterationCount is used by -genkeypair - keytool("-keystore ksgenbadkeyic -genkeypair -alias a -dname CN=A " + keytool("-keystore ksgenbadkeyic -genkeypair -keyalg DSA " + + "-alias a -dname CN=A " + "-storepass changeit " + "-J-Dkeystore.pkcs12.keyPbeIterationCount=abc") .shouldContain("keyPbeIterationCount is not a number: abc") diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/pkcs12/SameDN.java --- a/test/jdk/sun/security/pkcs12/SameDN.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/sun/security/pkcs12/SameDN.java Mon Nov 18 12:40:06 2019 -0500 @@ -55,7 +55,7 @@ } static void genkeypair(String alias, String dn) throws Exception { - keytool(COMMON + "-genkeypair -alias " + alias + " -dname " + dn) + keytool(COMMON + "-genkeypair -keyalg DSA -alias " + alias + " -dname " + dn) .shouldHaveExitValue(0); } diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/tools/jarsigner/AltProvider.java --- a/test/jdk/sun/security/tools/jarsigner/AltProvider.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/sun/security/tools/jarsigner/AltProvider.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,7 +69,7 @@ MOD_SRC_DIR.toString()); // Create a keystore - tool("keytool", "-keystore x.jks -storetype jks -genkeypair" + + tool("keytool", "-keystore x.jks -storetype jks -genkeypair -keyalg dsa" + " -storepass changeit -keypass changeit -alias x -dname CN=X") .shouldHaveExitValue(0); diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/tools/jarsigner/JavaKeyStoreAliasCaseInsensitive.java --- a/test/jdk/sun/security/tools/jarsigner/JavaKeyStoreAliasCaseInsensitive.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/sun/security/tools/jarsigner/JavaKeyStoreAliasCaseInsensitive.java Mon Nov 18 12:40:06 2019 -0500 @@ -50,7 +50,7 @@ public void testAliasCase() throws Exception { final String KEYSTORE_OPTIONS = "-storetype JKS -keystore " + "test-alias-case.jks -storepass changeit"; - SecurityTools.keytool(KEYSTORE_OPTIONS + " -genkeypair" + SecurityTools.keytool(KEYSTORE_OPTIONS + " -genkeypair -keyalg DSA" + " -keypass changeit -alias " + ALIAS + " -dname CN=" + ALIAS) .shouldHaveExitValue(0); String jarFilename = "test-alias-case.jar"; @@ -88,10 +88,10 @@ // signed by another certificate associated with ALIAS + "1". final String KEYSTORE_OPTIONS = "-storetype JKS -keystore" + " test-alias-storeHash-case.jks -storepass changeit"; - SecurityTools.keytool(KEYSTORE_OPTIONS + " -genkeypair" + SecurityTools.keytool(KEYSTORE_OPTIONS + " -genkeypair -keyalg DSA" + " -keypass changeit -alias " + ALIAS + "1 -dname CN=" + ALIAS + "1").shouldHaveExitValue(0); - SecurityTools.keytool(KEYSTORE_OPTIONS + " -genkeypair" + SecurityTools.keytool(KEYSTORE_OPTIONS + " -genkeypair -keyalg DSA" + " -keypass changeit -alias " + ALIAS + "2 -dname CN=" + ALIAS + "2").shouldHaveExitValue(0); String certReq = SecurityTools.keytool(KEYSTORE_OPTIONS + diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/tools/jarsigner/LineBrokenMultiByteCharacter.java --- a/test/jdk/sun/security/tools/jarsigner/LineBrokenMultiByteCharacter.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/sun/security/tools/jarsigner/LineBrokenMultiByteCharacter.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,6 +77,7 @@ static void prepare() throws Exception { SecurityTools.keytool("-keystore", keystoreFileName, "-genkeypair", + "-keyalg", "dsa", "-storepass", "changeit", "-keypass", "changeit", "-storetype", "JKS", "-alias", alias, "-dname", "CN=X", "-validity", "366") .shouldHaveExitValue(0); diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/tools/jarsigner/multiRelease/MVJarSigningTest.java --- a/test/jdk/sun/security/tools/jarsigner/multiRelease/MVJarSigningTest.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/sun/security/tools/jarsigner/multiRelease/MVJarSigningTest.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -152,6 +152,7 @@ "-J-Duser.language=en", "-J-Duser.country=US", "-genkey", + "-keyalg", "dsa", "-alias", ALIAS, "-keystore", KEYSTORE, "-keypass", KEYPASS, diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/tools/keytool/DeprecateKeyalg.java --- a/test/jdk/sun/security/tools/keytool/DeprecateKeyalg.java Wed Nov 13 17:21:31 2019 -0500 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -import jdk.test.lib.SecurityTools; -import jdk.test.lib.process.OutputAnalyzer; - -/** - * @test - * @bug 8212003 8214179 - * @summary Deprecating the default keytool -keyalg option - * @library /test/lib - */ - -public class DeprecateKeyalg { - - private static final String COMMON = "-keystore ks -storetype jceks " - + "-storepass changeit -keypass changeit"; - - public static void main(String[] args) throws Throwable { - - kt("-genkeypair -keyalg DSA -alias a -dname CN=A") - .shouldContain("Generating") - .shouldNotContain("-keyalg option must be specified"); - - kt("-genkeypair -alias b -dname CN=B") - .shouldContain("Generating") - .shouldContain("default key algorithm (DSA)") - .shouldContain("-keyalg option must be specified"); - - kt("-genseckey -keyalg DES -alias c") - .shouldContain("Generated") - .shouldNotContain("-keyalg option must be specified"); - - kt("-genseckey -alias d") - .shouldContain("Generated") - .shouldContain("default key algorithm (DES)") - .shouldContain("-keyalg option must be specified"); - - kt("-genkeypair -alias e -dname CN=e -keyalg EC -groupname brainpoolP256r1") - .shouldContain("Generating 256 bit EC (brainpoolP256r1) key pair"); - - kt("-genkeypair -alias f -dname CN=f -keyalg EC") - .shouldContain("Generating 256 bit EC (secp256r1) key pair"); - - kt("-genkeypair -alias g -dname CN=g -keyalg EC -keysize 384") - .shouldContain("Generating 384 bit EC (secp384r1) key pair"); - } - - private static OutputAnalyzer kt(String cmd) throws Throwable { - return SecurityTools.keytool(COMMON + " " + cmd) - .shouldHaveExitValue(0); - } -} diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/tools/keytool/DupImport.java --- a/test/jdk/sun/security/tools/keytool/DupImport.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/sun/security/tools/keytool/DupImport.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,9 +51,9 @@ Files.deleteIfExists(Paths.get("dup.ks")); // Create chain: root -> int -> me - run("-genkeypair -alias me -dname CN=Me"); - run("-genkeypair -alias int -dname CN=Int"); - run("-genkeypair -alias root -dname CN=Root"); + run("-genkeypair -keyalg DSA -alias me -dname CN=Me"); + run("-genkeypair -keyalg DSA -alias int -dname CN=Int"); + run("-genkeypair -keyalg DSA -alias root -dname CN=Root"); run("-certreq -alias int -file int.req"); run("-gencert -infile int.req -alias root -rfc -outfile int.resp"); diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/tools/keytool/HasSrcStoretypeOption.java --- a/test/jdk/sun/security/tools/keytool/HasSrcStoretypeOption.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/sun/security/tools/keytool/HasSrcStoretypeOption.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,7 +33,7 @@ public class HasSrcStoretypeOption { public static void main(String[] args) throws Exception { - run("-genkeypair -alias a -dname CN=A -storetype jceks -keystore jce"); + run("-genkeypair -keyalg DSA -alias a -dname CN=A -storetype jceks -keystore jce"); // When there is no -srcstoretype, it should be probed from the file run("-importkeystore -srckeystore jce -destkeystore jks -deststoretype jks"); } diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/tools/keytool/ImportPrompt.java --- a/test/jdk/sun/security/tools/keytool/ImportPrompt.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/sun/security/tools/keytool/ImportPrompt.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,11 +50,11 @@ public static void main(String[] args) throws Throwable { - kt("-keystore ks1 -genkeypair -alias a -dname CN=A"); + kt("-keystore ks1 -genkeypair -keyalg DSA -alias a -dname CN=A"); kt("-keystore ks1 -exportcert -alias a -file a.cert"); // Just create a keystore - kt("-keystore ks2 -genkeypair -alias b -dname CN=B"); + kt("-keystore ks2 -genkeypair -keyalg DSA -alias b -dname CN=B"); // no response text, assume no kt("-keystore ks2 -importcert -alias a -file a.cert"); diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/tools/keytool/KeyAlg.java --- a/test/jdk/sun/security/tools/keytool/KeyAlg.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/sun/security/tools/keytool/KeyAlg.java Mon Nov 18 12:40:06 2019 -0500 @@ -23,7 +23,7 @@ /* * @test - * @bug 8029659 + * @bug 8029659 8214179 * @summary Keytool, print key algorithm of certificate or key entry * @library /test/lib */ @@ -33,22 +33,25 @@ public class KeyAlg { public static void main(String[] args) throws Exception { - keytool("-genkeypair -alias ca -dname CN=CA -keyalg EC") - .shouldHaveExitValue(0); - keytool("-genkeypair -alias user -dname CN=User -keyalg RSA -keysize 1024") - .shouldHaveExitValue(0); - keytool("-certreq -alias user -file user.req").shouldHaveExitValue(0); + keytool("-genkeypair -alias ca -dname CN=CA -keyalg EC"); + keytool("-genkeypair -alias user -dname CN=User -keyalg RSA -keysize 1024"); + keytool("-certreq -alias user -file user.req"); keytool("-gencert -alias ca -rfc -sigalg SHA1withECDSA" - + " -infile user.req -outfile user.crt") - .shouldHaveExitValue(0); + + " -infile user.req -outfile user.crt"); keytool("-printcert -file user.crt") - .shouldHaveExitValue(0) .shouldMatch("Signature algorithm name:.*SHA1withECDSA") .shouldMatch("Subject Public Key Algorithm:.*1024.*RSA"); + keytool("-genkeypair -alias e -dname CN=e -keyalg EC -groupname brainpoolP256r1") + .shouldContain("Generating 256 bit EC (brainpoolP256r1) key pair"); + keytool("-genkeypair -alias f -dname CN=f -keyalg EC") + .shouldContain("Generating 256 bit EC (secp256r1) key pair"); + keytool("-genkeypair -alias g -dname CN=g -keyalg EC -keysize 384") + .shouldContain("Generating 384 bit EC (secp384r1) key pair"); } static OutputAnalyzer keytool(String s) throws Exception { return SecurityTools.keytool( - "-keystore ks -storepass changeit -keypass changeit " + s); + "-keystore ks -storepass changeit -keypass changeit " + s) + .shouldHaveExitValue(0); } } diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/tools/keytool/KeyToolTest.java --- a/test/jdk/sun/security/tools/keytool/KeyToolTest.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/sun/security/tools/keytool/KeyToolTest.java Mon Nov 18 12:40:06 2019 -0500 @@ -197,7 +197,7 @@ // jarsigner and keytool algorithm for DSA keys". Unfortunately // SunPKCS11-NSS does not support SHA256withDSA yet. if (cmd.contains("p11-nss.txt") && cmd.contains("-genkey") - && !cmd.contains("-keyalg")) { + && cmd.contains("DSA")) { cmd += " -sigalg SHA1withDSA -keysize 1024"; } test(input, cmd); @@ -352,7 +352,7 @@ remove("x.jks"); remove("x.jks.p1.cert"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -alias p1 -dname CN=olala"); + "-keypass changeit -genkeypair -keyalg DSA -alias p1 -dname CN=olala"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + "-exportcert -alias p1 -file x.jks.p1.cert"); ks = loadStore("x.jks", "changeit", "JKS"); @@ -377,7 +377,7 @@ // changealias and keyclone testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -alias p1 -dname CN=olala"); + "-keypass changeit -genkeypair -keyalg DSA -alias p1 -dname CN=olala"); testOK("changeit\n", "-keystore x.jks -storetype JKS " + "-changealias -alias p1 -destalias p11"); testOK("changeit\n", "-keystore x.jks -storetype JKS " + @@ -396,10 +396,10 @@ remove("x.jceks"); // DES, no need keysize testOK("changeit\nchangeit\n\n", "-keystore x.jceks -storetype JCEKS " + - "-genseckey -alias s1"); + "-genseckey -keyalg DES -alias s1"); // DES, keysize cannot be 128 testFail("changeit\n\n", "-keystore x.jceks -storetype JCEKS " + - "-genseckey -alias s11 -keysize 128"); + "-genseckey -keyalg DES -alias s11 -keysize 128"); // DESede. no need keysize testOK("changeit\n\n", "-keystore x.jceks -storetype JCEKS " + "-genseckey -keyalg DESede -alias s2"); @@ -411,19 +411,20 @@ // about keypass // can accept storepass testOK("\n", "-keystore x.jceks -storetype JCEKS -storepass changeit " + - "-genseckey -alias s4"); + "-genseckey -keyalg DES -alias s4"); // or a new one testOK("keypass\nkeypass\n", "-keystore x.jceks -storetype JCEKS " + - "-storepass changeit -genseckey -alias s5"); + "-storepass changeit -genseckey -keyalg DES -alias s5"); // keypass must be valid (prompt 3 times) testOK("bad\n\bad\nkeypass\nkeypass\n", "-keystore x.jceks " + - "-storetype JCEKS -storepass changeit -genseckey -alias s6"); + "-storetype JCEKS -storepass changeit -genseckey " + + "-keyalg DES -alias s6"); // keypass must be valid (prompt 3 times) testFail("bad\n\bad\nbad\n", "-keystore x.jceks -storetype JCEKS " + - "-storepass changeit -genseckey -alias s7"); + "-storepass changeit -genseckey -keyalg DES -alias s7"); // keypass must be valid (prompt 3 times) testFail("bad\n\bad\nbad\nkeypass\n", "-keystore x.jceks " + - "-storetype JCEKS -storepass changeit -genseckey -alias s7"); + "-storetype JCEKS -storepass changeit -genseckey -keyalg DES -alias s7"); ks = loadStore("x.jceks", "changeit", "JCEKS"); assertTrue(ks.getKey("s1", "changeit".toCharArray()) .getAlgorithm().equalsIgnoreCase("DES"), "s1 is DES"); @@ -452,7 +453,7 @@ remove("x.jceks"); // create 2 entries... testOK("changeit\nchangeit\n\n", "-keystore x.jceks -storetype JCEKS " + - "-genkeypair -alias p1 -dname CN=Olala"); + "-genkeypair -keyalg DSA -alias p1 -dname CN=Olala"); testOK("", "-keystore x.jceks -storetype JCEKS -storepass changeit " + "-importcert -alias c1 -file x.jks.p1.cert -noprompt"); ks = loadStore("x.jceks", "changeit", "JCEKS"); @@ -532,7 +533,7 @@ remove("x.jks"); // generate entry with different keypass testOK("changeit\nkeypass\nkeypass\n", "-keystore x.jceks " + - "-storetype JCEKS -genkeypair -alias p2 -dname CN=Olala"); + "-storetype JCEKS -genkeypair -keyalg DSA -alias p2 -dname CN=Olala"); // prompt testOK("changeit\nchangeit\nchangeit\nkeypass\n", "-importkeystore " + "-srckeystore x.jceks -srcstoretype JCEKS " + @@ -581,10 +582,10 @@ remove("x.jks"); // create SecretKeyEntry testOK("changeit\n\n", "-keystore x.jceks -storetype JCEKS " + - "-genseckey -alias s1"); + "-genseckey -keyalg DES -alias s1"); // create SecretKeyEntry testOK("changeit\n\n", "-keystore x.jceks -storetype JCEKS " + - "-genseckey -alias s2"); + "-genseckey -keyalg DES -alias s2"); // remove the keypass!=storepass one testOK("changeit\n", "-keystore x.jceks -storetype JCEKS " + "-delete -alias p2"); @@ -629,13 +630,13 @@ remove("x.jks"); // just type ENTER means keypass=storepass testOK("changeit\nchangeit\n\n", "-keystore x.jks -storetype JKS " + - "-genkeypair -alias p1 -dname CN=olala"); + "-genkeypair -keyalg DSA -alias p1 -dname CN=olala"); remove("x.p12"); // PKCS12 only need storepass testOK("", "-keystore x.p12 -storetype PKCS12 -storepass changeit " + - "-genkeypair -alias p0 -dname CN=olala"); + "-genkeypair -keyalg DSA -alias p0 -dname CN=olala"); testOK("changeit\n", "-keystore x.p12 -storetype PKCS12 " + - "-genkeypair -alias p1 -dname CN=olala"); + "-genkeypair -keyalg DSA -alias p1 -dname CN=olala"); // when specify keypass, make sure keypass==storepass... testOK("changeit\n", "-keystore x.p12 -keypass changeit " + "-storetype PKCS12 -genkeypair -keyalg DSA -alias p3 -dname CN=olala"); @@ -658,9 +659,9 @@ remove("x.p12"); // PKCS12 only need storepass testOK("", "-keystore x.p12 -storetype PKCS12 -storepass changeit " + - "-genkeypair -alias p0 -dname CN=olala"); + "-genkeypair -keyalg DSA -alias p0 -dname CN=olala"); testOK("", "-storepass changeit -keystore x.p12 -storetype PKCS12 " + - "-genkeypair -alias p1 -dname CN=olala"); + "-genkeypair -keyalg DSA -alias p1 -dname CN=olala"); // when specify keypass, make sure keypass==storepass... testOK("", "-storepass changeit -keystore x.p12 -keypass changeit " + "-storetype PKCS12 -genkeypair -keyalg DSA -alias p3 -dname CN=olala"); @@ -696,14 +697,14 @@ "BEFORE THIS TEST ***"); testOK("", p11Arg + - "-storepass test12 -genkeypair -alias p1 -dname CN=olala"); - testOK("test12\n", p11Arg + "-genkeypair -alias p2 -dname CN=olala2"); + "-storepass test12 -genkeypair -keyalg DSA -alias p1 -dname CN=olala"); + testOK("test12\n", p11Arg + "-genkeypair -keyalg DSA -alias p2 -dname CN=olala2"); // cannot provide keypass for PKCS11 testFail("test12\n", p11Arg + - "-keypass test12 -genkeypair -alias p3 -dname CN=olala3"); + "-keypass test12 -genkeypair -keyalg DSA -alias p3 -dname CN=olala3"); // cannot provide keypass for PKCS11 testFail("test12\n", p11Arg + - "-keypass nonsense -genkeypair -alias p3 -dname CN=olala3"); + "-keypass nonsense -genkeypair -keyalg DSA -alias p3 -dname CN=olala3"); testOK("", p11Arg + "-storepass test12 -list"); assertTrue(out.indexOf("Your keystore contains 2 entries") != -1, @@ -738,8 +739,8 @@ KeyStore ks; testOK("", p11Arg + - "-storepass test12 -genkeypair -alias p1 -dname CN=olala"); - testOK("test12\n", p11Arg + "-genkeypair -alias p2 -dname CN=olala2"); + "-storepass test12 -genkeypair -keyalg DSA -alias p1 -dname CN=olala"); + testOK("test12\n", p11Arg + "-genkeypair -keyalg DSA -alias p2 -dname CN=olala2"); // test importkeystore for pkcs11 remove("x.jks"); @@ -809,7 +810,7 @@ KeyStore ks; remove("x.jks"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala"); + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + "-exportcert -file x.jks.p1.cert"); /* deleted */ testOK("", "-keystore x.jks -storetype JKS " + @@ -842,7 +843,7 @@ void sqeKeyclonetest() throws Exception { remove("x.jks"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala"); + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala"); // new pass testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + "-keypass changeit -new newpass -keyclone -dest p0"); @@ -871,7 +872,7 @@ void sqeKeypasswdTest() throws Exception { remove("x.jks"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala"); + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + "-keypass changeit -keypasswd -new newpass"); /*change back*/ testOK("", "-keystore x.jks -storetype JKS " + @@ -909,7 +910,7 @@ testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + "-delete -alias mykey"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass keypass -genkeypair -dname CN=olala"); + "-keypass keypass -genkeypair -keyalg DSA -dname CN=olala"); testFail("", "-keystore x.jks -storetype JKS -storepass changeit " + "-keypasswd -new newpass"); testOK("keypass\n", "-keystore x.jks -storetype JKS " + @@ -922,7 +923,7 @@ void sqeListTest() throws Exception { remove("x.jks"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala"); + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit -list"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + "-list -alias mykey"); @@ -948,7 +949,7 @@ void sqeSelfCertTest() throws Exception { remove("x.jks"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala"); + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit -selfcert"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + "-keypass changeit -selfcert"); @@ -974,7 +975,7 @@ // diff pass remove("x.jks"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass keypass -genkeypair -dname CN=olala"); + "-keypass keypass -genkeypair -keyalg DSA -dname CN=olala"); testFail("", "-keystore x.jks -storetype JKS " + "-storepass changeit -selfcert"); testOK("keypass\n", "-keystore x.jks -storetype JKS " + @@ -995,7 +996,7 @@ void sqeStorepassTest() throws Exception { remove("x.jks"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala"); + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala"); // all in arg testOK("", "-storepasswd -keystore x.jks -storetype JKS " + "-storepass changeit -new newstore"); @@ -1044,13 +1045,13 @@ remove("x.jks"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala"); + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala"); testFail("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala"); + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala -alias newentry"); + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala -alias newentry"); testFail("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala -alias newentry"); + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala -alias newentry"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + "-keypass changeit -genkeypair -dname CN=olala -keyalg DSA " + "-alias n1"); @@ -1061,19 +1062,19 @@ "-keypass changeit -genkeypair -dname CN=olala " + "-keyalg NoSuchAlg -alias n3"); testFail("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala -keysize 56 " + + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala -keysize 56 " + "-alias n4"); testFail("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala -keysize 999 " + + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala -keysize 999 " + "-alias n5"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala -keysize 512 " + + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala -keysize 512 " + "-alias n6"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala -keysize 1024 " + + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala -keysize 1024 " + "-alias n7"); testFail("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala " + + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala " + "-sigalg NoSuchAlg -alias n8"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + "-keypass changeit -genkeypair -dname CN=olala -keyalg RSA " + @@ -1088,12 +1089,12 @@ "-keypass changeit -genkeypair -dname CN=olala -keyalg RSA " + "-sigalg NoSuchAlg -alias n12"); testFail("", "-keystore badkeystore -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala " + + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala " + "-alias n14"); testFail("", "-keystore x.jks -storetype JKS -storepass badpass " + - "-keypass changeit -genkeypair -dname CN=olala -alias n16"); + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala -alias n16"); testFail("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CNN=olala -alias n17"); + "-keypass changeit -genkeypair -keyalg DSA -dname CNN=olala -alias n17"); remove("x.jks"); } @@ -1103,7 +1104,7 @@ testFail("", "-keystore x.jks -storetype JKS -storepass changeit " + "-export -file mykey.cert -alias mykey"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala"); + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + "-export -file mykey.cert -alias mykey"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + @@ -1131,11 +1132,11 @@ testFail("", "-keystore x.jks -storetype JKS -storepass changeit " + "-delete -alias mykey"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala"); + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + "-delete -alias mykey"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala"); + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala"); // keystore name illegal testFail("", "-keystore aa\\bb//cc\\dd -storepass changeit " + "-delete -alias mykey"); @@ -1157,7 +1158,7 @@ remove("csr1"); // PrivateKeyEntry can do certreq testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala -keysize 1024"); + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala -keysize 1024"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + "-certreq -file csr1 -alias mykey"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + @@ -1221,7 +1222,7 @@ remove("mykey.cert"); remove("myweakkey.cert"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + - "-keypass changeit -genkeypair -dname CN=olala"); + "-keypass changeit -genkeypair -keyalg DSA -dname CN=olala"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + "-export -file mykey.cert -alias mykey"); testOK("", "-keystore x.jks -storetype JKS -storepass changeit " + @@ -1264,7 +1265,7 @@ remove("x.jks"); String simple = "-keystore x.jks -storetype JKS -storepass changeit " + "-keypass changeit -noprompt -keyalg " + keyAlg + " "; - String pre = simple + "-genkeypair -dname CN=Olala -alias "; + String pre = simple + "-genkeypair -keyalg DSA -dname CN=Olala -alias "; // Version and SKID testOK("", pre + "o1"); @@ -1678,30 +1679,30 @@ remove("x.jks"); testOK("", "-help"); - // 2. keytool -genkey -v -keysize 512 Enter "a" for the keystore + // 2. keytool -genkey -keyalg DSA -v -keysize 512 Enter "a" for the keystore // password. Check error (password too short). Enter "password" for // the keystore password. Hit 'return' for "first and last name", // "organizational unit", "City", "State", and "Country Code". // Type "yes" when they ask you if everything is correct. // Type 'return' for new key password. testOK("a\npassword\npassword\nMe\nHere\nNow\nPlace\nPlace\nUS\nyes\n\n", - "-genkey -v -keysize 512 -keystore x.jks -storetype JKS"); + "-genkey -keyalg DSA -v -keysize 512 -keystore x.jks -storetype JKS"); // 3. keytool -list -v -storepass password testOK("", "-list -v -storepass password -keystore x.jks -storetype JKS"); // 4. keytool -list -v Type "a" for the keystore password. // Check error (wrong keystore password). testFail("a\n", "-list -v -keystore x.jks -storetype JKS"); assertTrue(ex.indexOf("password was incorrect") != -1); - // 5. keytool -genkey -v -keysize 512 Enter "password" as the password. + // 5. keytool - -keyalg DSA -v -keysize 512 Enter "password" as the password. // Check error (alias 'mykey' already exists). - testFail("password\n", "-genkey -v -keysize 512" + + testFail("password\n", "-genkey -keyalg DSA -v -keysize 512" + " -keystore x.jks -storetype JKS"); assertTrue(ex.indexOf("alias already exists") != -1); - // 6. keytool -genkey -v -keysize 512 -alias mykey2 -storepass password + // 6. keytool -genkey -keyalg DSA -v -keysize 512 -alias mykey2 -storepass password // Hit 'return' for "first and last name", "organizational unit", "City", // "State", and "Country Code". Type "yes" when they ask you if // everything is correct. Type 'return' for new key password. - testOK("\n\n\n\n\n\nyes\n\n", "-genkey -v -keysize 512 -alias mykey2" + + testOK("\n\n\n\n\n\nyes\n\n", "-genkey -keyalg DSA -v -keysize 512 -alias mykey2" + " -storepass password -keystore x.jks -storetype JKS"); // 7. keytool -list -v Type 'password' for the store password. testOK("password\n", "-list -v -keystore x.jks -storetype JKS"); @@ -1810,7 +1811,7 @@ void sszzTest() throws Exception { testAnyway("", NSS_P11_ARG+"-delete -alias nss -storepass test12"); testAnyway("", NZZ_P11_ARG+"-delete -alias nss -storepass test12"); - testOK("", NSS_P11_ARG+"-genkeypair -dname CN=NSS " + + testOK("", NSS_P11_ARG+"-genkeypair -keyalg DSA -dname CN=NSS " + "-alias nss -storepass test12"); testOK("", NSS_SRC_P11_ARG + NZZ_P11_ARG + "-importkeystore -srcstorepass test12 -deststorepass test12"); diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/tools/keytool/PKCS12Passwd.java --- a/test/jdk/sun/security/tools/keytool/PKCS12Passwd.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/sun/security/tools/keytool/PKCS12Passwd.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,11 +49,11 @@ public static void main(String[] args) throws Exception { // A PrivateKeyEntry - kt("-genkeypair -alias a -dname CN=A") + kt("-genkeypair -alias a -dname CN=A -keyalg DSA") .shouldHaveExitValue(0); // A TrustedCertificateEntry (genkeypair, export, delete, import) - kt("-genkeypair -alias b -dname CN=B") + kt("-genkeypair -alias b -dname CN=B -keyalg DSA") .shouldHaveExitValue(0); kt("-exportcert -alias b -file b.cert") .shouldHaveExitValue(0); @@ -90,7 +90,7 @@ // A PKCS12 keystore can be loaded as a JKS, and it follows JKS rules // which means the storepass and keypass can be changed separately! - ktFull("-genkeypair -alias a -dname CN=A -storetype pkcs12 " + ktFull("-genkeypair -alias a -dname CN=A -storetype pkcs12 -keyalg DSA " + "-storepass changeit -keypass changeit -keystore p12") .shouldHaveExitValue(0); @@ -112,7 +112,7 @@ // PKCS12 rules that both passwords are changed at the same time and // some commands are rejected. - ktFull("-genkeypair -alias a -dname CN=A -storetype jks " + ktFull("-genkeypair -alias a -dname CN=A -storetype jks -keyalg DSA " + "-storepass changeit -keypass changeit -keystore jks") .shouldHaveExitValue(0); diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/tools/keytool/ProbingFailure.java --- a/test/jdk/sun/security/tools/keytool/ProbingFailure.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/sun/security/tools/keytool/ProbingFailure.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,7 +53,7 @@ public static void main(String[] args) throws Exception { // genkeypair - kt("-genkeypair -keystore mks -alias a -dname CN=A -storetype MYKS") + kt("-genkeypair -keystore mks -alias a -dname CN=A -keyalg DSA -storetype MYKS") .shouldHaveExitValue(0); // list diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/tools/keytool/RealType.java --- a/test/jdk/sun/security/tools/keytool/RealType.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/sun/security/tools/keytool/RealType.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,7 +45,7 @@ public static void main(String[] args) throws Throwable { - kt("-genkeypair -alias a -dname CN=A -keypass changeit -storetype jks") + kt("-genkeypair -keyalg DSA -alias a -dname CN=A -keypass changeit -storetype jks") .shouldHaveExitValue(0); // -keypasswd command should be allowed on JKS @@ -54,7 +54,7 @@ Files.delete(Paths.get("ks")); - kt("-genkeypair -alias a -dname CN=A -keypass changeit -storetype pkcs12") + kt("-genkeypair -keyalg DSA -alias a -dname CN=A -keypass changeit -storetype pkcs12") .shouldHaveExitValue(0); // A pkcs12 keystore cannot be loaded as a JCEKS keystore diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/tools/keytool/RemoveKeyAlgDefault.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/jdk/sun/security/tools/keytool/RemoveKeyAlgDefault.java Mon Nov 18 12:40:06 2019 -0500 @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import jdk.test.lib.SecurityTools; +import jdk.test.lib.process.OutputAnalyzer; + +/** + * @test + * @bug 8212003 8214024 + * @summary Deprecating the default keytool -keyalg option + * @library /test/lib + */ + +public class RemoveKeyAlgDefault { + + private static final String COMMON = "-keystore ks -storetype jceks " + + "-storepass changeit -keypass changeit"; + + public static void main(String[] args) throws Throwable { + + kt("-genkeypair -keyalg DSA -alias a -dname CN=A") + .shouldHaveExitValue(0) + .shouldContain("Generating") + .shouldNotContain("-keyalg option must be specified"); + + kt("-genkeypair -alias b -dname CN=B") + .shouldHaveExitValue(1) + .shouldContain("-keyalg option must be specified"); + + kt("-genseckey -keyalg DES -alias c") + .shouldHaveExitValue(0) + .shouldContain("Generated") + .shouldNotContain("-keyalg option must be specified"); + + kt("-genseckey -alias d") + .shouldHaveExitValue(1) + .shouldContain("-keyalg option must be specified"); + } + + private static OutputAnalyzer kt(String cmd) throws Throwable { + return SecurityTools.keytool(COMMON + " " + cmd); + } +} diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/tools/keytool/WeakAlg.java --- a/test/jdk/sun/security/tools/keytool/WeakAlg.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/sun/security/tools/keytool/WeakAlg.java Mon Nov 18 12:40:06 2019 -0500 @@ -178,7 +178,7 @@ // no warning if all certs kt("-importcert -alias b -file a.crt -storetype jks -noprompt") .shouldNotContain("Warning:"); - kt("-genkeypair -alias a -dname CN=A") + kt("-genkeypair -keyalg DSA -alias a -dname CN=A") .shouldContain("JKS keystore uses a proprietary format"); kt("-list") .shouldContain("JKS keystore uses a proprietary format"); @@ -202,7 +202,7 @@ rm("ks"); - kt("-genkeypair -alias a -dname CN=A -storetype jceks") + kt("-genkeypair -keyalg DSA -alias a -dname CN=A -storetype jceks") .shouldContain("JCEKS keystore uses a proprietary format"); kt("-list") .shouldContain("JCEKS keystore uses a proprietary format"); @@ -239,7 +239,7 @@ static void checkInplaceImportKeyStore() throws Exception { rm("ks"); - genkeypair("a", ""); + genkeypair("a", "-keyalg DSA"); // Same type backup importkeystore("ks", "ks", "") diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/tools/keytool/console.sh --- a/test/jdk/sun/security/tools/keytool/console.sh Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/sun/security/tools/keytool/console.sh Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ # -# Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -75,7 +75,7 @@ echo "==========================================" echo rm $KS 2> /dev/null -$J5/bin/keytool -keystore $KS -genkey -dname CN=olala -storepass $PASSW || exit 1 +$J5/bin/keytool -keystore $KS -genkey -keyalg DSA -dname CN=olala -storepass $PASSW || exit 1 $JM/bin/keytool -keystore $KS -list -storepass $PASSW || exit 2 echo "==========================================" @@ -84,7 +84,7 @@ echo rm $KS 2> /dev/null -$JM/bin/keytool -keystore $KS -genkey -dname CN=olala -storepass $PASSW || exit 3 +$JM/bin/keytool -keystore $KS -genkey -keyalg DSA -dname CN=olala -storepass $PASSW || exit 3 $J5/bin/keytool -keystore $KS -list -storepass $PASSW || exit 4 echo "============================================================" @@ -93,7 +93,7 @@ echo rm $KS 2> /dev/null -$J5/bin/keytool -keystore $KS -genkey -dname CN=olala || exit 5 +$J5/bin/keytool -keystore $KS -genkey -keyalg DSA -dname CN=olala || exit 5 $JM/bin/keytool -keystore $KS -list || exit 6 echo $PASSW| $J5/bin/keytool -keystore $KS -list || exit 7 echo $PASSW| $JM/bin/keytool -keystore $KS -list || exit 8 @@ -104,7 +104,7 @@ echo rm $KS 2> /dev/null -$JM/bin/keytool -keystore $KS -genkey -dname CN=olala || exit 9 +$JM/bin/keytool -keystore $KS -genkey -keyalg DSA -dname CN=olala || exit 9 $J5/bin/keytool -keystore $KS -list || exit 10 echo $PASSW| $JM/bin/keytool -keystore $KS -list || exit 11 echo $PASSW| $J5/bin/keytool -keystore $KS -list || exit 12 @@ -115,7 +115,7 @@ echo rm $KS 2> /dev/null -echo $PASSW| $J5/bin/keytool -keystore $KS -genkey -dname CN=olala || exit 13 +echo $PASSW| $J5/bin/keytool -keystore $KS -genkey -keyalg DSA -dname CN=olala || exit 13 $JM/bin/keytool -keystore $KS -list || exit 14 echo $PASSW| $J5/bin/keytool -keystore $KS -list || exit 15 echo $PASSW| $JM/bin/keytool -keystore $KS -list || exit 16 diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/jdk/sun/security/tools/keytool/i18n.html --- a/test/jdk/sun/security/tools/keytool/i18n.html Wed Nov 13 17:21:31 2019 -0500 +++ b/test/jdk/sun/security/tools/keytool/i18n.html Mon Nov 18 12:40:06 2019 -0500 @@ -9,7 +9,7 @@ If you are on a Windows platform, delete the .keystore file in your home directory.
  • keytool -help -
  • keytool -genkey -v -keysize 512 +
  • keytool -genkey -keyalg DSA -v -keysize 512 Enter "a" for the keystore password. Check error (password too short). Enter "password" for the keystore password. Re-enter "password" to confirm. @@ -21,10 +21,10 @@
  • keytool -list -v Type "a" for the keystore password. Check error (wrong keystore password). -
  • keytool -genkey -v -keysize 512 +
  • keytool -genkey -keyalg DSA -v -keysize 512 Enter "password" as the password. Check error (alias 'mykey' already exists). -
  • keytool -genkey -v -keysize 512 -alias mykey2 -storepass password +
  • keytool -genkey -keyalg DSA -v -keysize 512 -alias mykey2 -storepass password Hit 'return' for "first and last name", "organizational unit", "organization", "City", "State", and "Country Code". Type "yes" when they ask you if everything is correct. diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/langtools/tools/javac/api/TestModuleUnnamedPackage.java --- a/test/langtools/tools/javac/api/TestModuleUnnamedPackage.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/langtools/tools/javac/api/TestModuleUnnamedPackage.java Mon Nov 18 12:40:06 2019 -0500 @@ -25,7 +25,8 @@ * @test * @bug 8234025 * @summary Elements.getPackageElement(ModuleElement,CharSequence) returns null for unnamed package - * @modules jdk.compiler + * @modules jdk.compiler/com.sun.tools.javac.api + * jdk.compiler/com.sun.tools.javac.main * @library /tools/lib /tools/javac/lib * @build toolbox.ModuleBuilder toolbox.ToolBox * @run main TestModuleUnnamedPackage diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/langtools/tools/javac/processing/model/TestSourceVersion.java --- a/test/langtools/tools/javac/processing/model/TestSourceVersion.java Wed Nov 13 17:21:31 2019 -0500 +++ b/test/langtools/tools/javac/processing/model/TestSourceVersion.java Mon Nov 18 12:40:06 2019 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,7 +23,7 @@ /* * @test - * @bug 7025809 8028543 6415644 8028544 8029942 8187951 8193291 8196551 + * @bug 7025809 8028543 6415644 8028544 8029942 8187951 8193291 8196551 8233096 * @summary Test latest, latestSupported, underscore as keyword, etc. * @author Joseph D. Darcy * @modules java.compiler @@ -31,11 +31,12 @@ */ import java.util.*; +import java.util.function.Predicate; import javax.lang.model.SourceVersion; import static javax.lang.model.SourceVersion.*; /** - * Verify latest[Supported] behavior. + * Verify behavior of latest[Supported] and other methods. */ public class TestSourceVersion { public static void main(String... args) { @@ -43,6 +44,7 @@ testVersionVaryingKeywords(); testRestrictedKeywords(); testVar(); + testYield(); } private static void testLatestSupported() { @@ -52,8 +54,10 @@ SourceVersion latestSupported = SourceVersion.latestSupported(); if (latest == last && - latestSupported == SourceVersion.valueOf("RELEASE_" + Runtime.version().feature()) && - (latest == latestSupported || (latest.ordinal() - latestSupported.ordinal() == 1)) ) + latestSupported == SourceVersion.valueOf("RELEASE_" + + Runtime.version().feature()) && + (latest == latestSupported || + (latest.ordinal() - latestSupported.ordinal() == 1)) ) return; else { throw new RuntimeException("Unexpected release value(s) found:\n" + @@ -73,14 +77,14 @@ String key = entry.getKey(); SourceVersion value = entry.getValue(); - check(true, isKeyword(key), "keyword", latest()); - check(false, isName(key), "name", latest()); + check(true, key, (String s) -> isKeyword(s), "keyword", latest()); + check(false, key, (String s) -> isName(s), "name", latest()); for(SourceVersion version : SourceVersion.values()) { boolean isKeyword = version.compareTo(value) >= 0; - check(isKeyword, isKeyword(key, version), "keyword", version); - check(!isKeyword, isName(key, version), "name", version); + check(isKeyword, key, (String s) -> isKeyword(s, version), "keyword", version); + check(!isKeyword, key, (String s) -> isName(s, version), "name", version); } } } @@ -98,31 +102,47 @@ Set.of("open", "module", "requires", "transitive", "exports", "opens", "to", "uses", "provides", "with"); - for(String key : restrictedKeywords) { - for(SourceVersion version : SourceVersion.values()) { - check(false, isKeyword(key, version), "keyword", version); - check(true, isName(key, version), "name", version); + for (String key : restrictedKeywords) { + for (SourceVersion version : SourceVersion.values()) { + check(false, key, (String s) -> isKeyword(s, version), "keyword", version); + check(true, key, (String s) -> isName(s, version), "name", version); } } } private static void testVar() { + for (SourceVersion version : SourceVersion.values()) { + Predicate isKeywordVersion = (String s) -> isKeyword(s, version); + Predicate isNameVersion = (String s) -> isName(s, version); - for(SourceVersion version : SourceVersion.values()) { - check(false, isKeyword("var", version), "keyword", version); - check(false, isKeyword("foo.var", version), "keyword", version); - check(false, isKeyword("var.foo", version), "keyword", version); - - check(true, isName("var", version), "name", version); - check(true, isName("foo.var", version), "name", version); - check(true, isName("var.foo", version), "name", version); + for (String name : List.of("var", "foo.var", "var.foo")) { + check(false, name, isKeywordVersion, "keyword", version); + check(true, name, isNameVersion, "name", version); + } } } - private static void check(boolean result, boolean expected, - String message, SourceVersion version) { + private static void testYield() { + for (SourceVersion version : SourceVersion.values()) { + Predicate isKeywordVersion = (String s) -> isKeyword(s, version); + Predicate isNameVersion = (String s) -> isName(s, version); + + for (String name : List.of("yield", "foo.yield", "yield.foo")) { + check(false, name, isKeywordVersion, "keyword", version); + check(true, name, isNameVersion, "name", version); + } + } + } + + private static void check(boolean expected, + String input, + Predicate predicate, + String message, + SourceVersion version) { + boolean result = predicate.test(input); if (result != expected) { - throw new RuntimeException("Unexpected " + message + "-ness of _ on " + version); + throw new RuntimeException("Unexpected " + message + "-ness of " + input + + " on " + version); } } } diff -r 4d58a35f3cfa -r 4ad81e9e30fd test/lib/sun/hotspot/WhiteBox.java