# HG changeset patch # User duke # Date 1499279113 -7200 # Node ID a46e5922bb400f90ed5ae9b31bd9b80dda2ad338 # Parent 17efac395638494091b058b6a5075cedb73984c1# Parent 89640d85c8cdf036825262b5707a63daa5370017 Merge diff -r 17efac395638 -r a46e5922bb40 .hgtags-top-repo --- a/.hgtags-top-repo Thu Mar 19 16:13:40 2015 -0700 +++ b/.hgtags-top-repo Wed Jul 05 20:25:13 2017 +0200 @@ -297,3 +297,4 @@ 1822e59f17121b09e7899cf338cfb6e37fe5fceb jdk9-b52 d6ed47125a76cd1cf8a100568507bfb5e9669d9f jdk9-b53 cb7367141e910e265b8344a8facee740bd1e5467 jdk9-b54 +0c37a832458f0e0b7d2a3f1a6f69aeae311aeb18 jdk9-b55 diff -r 17efac395638 -r a46e5922bb40 common/autoconf/configure.ac --- a/common/autoconf/configure.ac Thu Mar 19 16:13:40 2015 -0700 +++ b/common/autoconf/configure.ac Wed Jul 05 20:25:13 2017 +0200 @@ -54,6 +54,7 @@ AC_DEFUN_ONCE([CUSTOM_EARLY_HOOK]) AC_DEFUN_ONCE([CUSTOM_LATE_HOOK]) +AC_DEFUN_ONCE([CUSTOM_CONFIG_OUTPUT_GENERATED_HOOK]) AC_DEFUN_ONCE([CUSTOM_SUMMARY_AND_WARNINGS_HOOK]) # This line needs to be here, verbatim, after all includes and the dummy hook @@ -265,6 +266,7 @@ # Create the actual output files. Now the main work of configure is done. AC_OUTPUT +CUSTOM_CONFIG_OUTPUT_GENERATED_HOOK # Try to move the config.log file to the output directory. if test -e ./config.log; then diff -r 17efac395638 -r a46e5922bb40 common/autoconf/flags.m4 --- a/common/autoconf/flags.m4 Thu Mar 19 16:13:40 2015 -0700 +++ b/common/autoconf/flags.m4 Wed Jul 05 20:25:13 2017 +0200 @@ -481,9 +481,8 @@ CFLAGS_JDKLIB_EXTRA="${CFLAGS_JDKLIB_EXTRA} -errtags=yes -errfmt" CXXFLAGS_JDKLIB_EXTRA="${CXXFLAGS_JDKLIB_EXTRA} -errtags=yes -errfmt" elif test "x$TOOLCHAIN_TYPE" = xxlc; then - LDFLAGS_JDK="${LDFLAGS_JDK} -q64 -brtl -bnolibpath -liconv -bexpall" - CFLAGS_JDK="${CFLAGS_JDK} -qchars=signed -q64 -qfullpath -qsaveopt" - CXXFLAGS_JDK="${CXXFLAGS_JDK} -qchars=signed -q64 -qfullpath -qsaveopt" + CFLAGS_JDK="${CFLAGS_JDK} -qchars=signed -qfullpath -qsaveopt" + CXXFLAGS_JDK="${CXXFLAGS_JDK} -qchars=signed -qfullpath -qsaveopt" fi if test "x$CFLAGS" != "x${ADDED_CFLAGS}"; then @@ -762,6 +761,8 @@ elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then LDFLAGS_JDK="$LDFLAGS_JDK -z defs -xildoff -ztext" LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK -norunpath -xnolib" + elif test "x$TOOLCHAIN_TYPE" = xxlc; then + LDFLAGS_JDK="${LDFLAGS_JDK} -brtl -bnolibpath -liconv -bexpall -bernotok" fi # Customize LDFLAGS for executables diff -r 17efac395638 -r a46e5922bb40 common/autoconf/generated-configure.sh --- a/common/autoconf/generated-configure.sh Thu Mar 19 16:13:40 2015 -0700 +++ b/common/autoconf/generated-configure.sh Wed Jul 05 20:25:13 2017 +0200 @@ -4362,13 +4362,14 @@ + # This line needs to be here, verbatim, after all includes and the dummy hook # definitions. It is replaced with custom functionality when building # custom sources. #CUSTOM_AUTOCONF_INCLUDE # Do not change or remove the following line, it is needed for consistency checks: -DATE_WHEN_GENERATED=1425994551 +DATE_WHEN_GENERATED=1426774983 ############################################################################### # @@ -41173,7 +41174,7 @@ # jtreg win32 script works for everybody - JTREGEXE="$JT_HOME/win32/bin/jtreg" + JTREGEXE="$JT_HOME/bin/jtreg" if test ! -f "$JTREGEXE"; then as_fn_error $? "JTReg executable does not exist: $JTREGEXE" "$LINENO" 5 @@ -42372,9 +42373,8 @@ CFLAGS_JDKLIB_EXTRA="${CFLAGS_JDKLIB_EXTRA} -errtags=yes -errfmt" CXXFLAGS_JDKLIB_EXTRA="${CXXFLAGS_JDKLIB_EXTRA} -errtags=yes -errfmt" elif test "x$TOOLCHAIN_TYPE" = xxlc; then - LDFLAGS_JDK="${LDFLAGS_JDK} -q64 -brtl -bnolibpath -liconv -bexpall" - CFLAGS_JDK="${CFLAGS_JDK} -qchars=signed -q64 -qfullpath -qsaveopt" - CXXFLAGS_JDK="${CXXFLAGS_JDK} -qchars=signed -q64 -qfullpath -qsaveopt" + CFLAGS_JDK="${CFLAGS_JDK} -qchars=signed -qfullpath -qsaveopt" + CXXFLAGS_JDK="${CXXFLAGS_JDK} -qchars=signed -qfullpath -qsaveopt" fi if test "x$CFLAGS" != "x${ADDED_CFLAGS}"; then @@ -42668,6 +42668,8 @@ elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then LDFLAGS_JDK="$LDFLAGS_JDK -z defs -xildoff -ztext" LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK -norunpath -xnolib" + elif test "x$TOOLCHAIN_TYPE" = xxlc; then + LDFLAGS_JDK="${LDFLAGS_JDK} -brtl -bnolibpath -liconv -bexpall -bernotok" fi # Customize LDFLAGS for executables @@ -52969,6 +52971,7 @@ fi + # Try to move the config.log file to the output directory. if test -e ./config.log; then $MV -f ./config.log "$OUTPUT_ROOT/config.log" 2> /dev/null diff -r 17efac395638 -r a46e5922bb40 common/autoconf/toolchain.m4 --- a/common/autoconf/toolchain.m4 Thu Mar 19 16:13:40 2015 -0700 +++ b/common/autoconf/toolchain.m4 Wed Jul 05 20:25:13 2017 +0200 @@ -763,7 +763,7 @@ BASIC_FIXUP_PATH([JT_HOME]) # jtreg win32 script works for everybody - JTREGEXE="$JT_HOME/win32/bin/jtreg" + JTREGEXE="$JT_HOME/bin/jtreg" if test ! -f "$JTREGEXE"; then AC_MSG_ERROR([JTReg executable does not exist: $JTREGEXE]) diff -r 17efac395638 -r a46e5922bb40 corba/.hgtags --- a/corba/.hgtags Thu Mar 19 16:13:40 2015 -0700 +++ b/corba/.hgtags Wed Jul 05 20:25:13 2017 +0200 @@ -297,3 +297,4 @@ b8538bbb6f224ab1dabba579137099c166ad4724 jdk9-b52 aadc16ca5ab7d56f92ef9dbfa443595a939241b4 jdk9-b53 d469c5ad0c763e325a78e0af3016878a57dfc5cc jdk9-b54 +734ca5311a225711b79618f3e92f47f07c82154a jdk9-b55 diff -r 17efac395638 -r a46e5922bb40 hotspot/.hgtags --- a/hotspot/.hgtags Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/.hgtags Wed Jul 05 20:25:13 2017 +0200 @@ -457,3 +457,4 @@ 9fb7fdc554db5be5c5b10f88f529ec3b870c44e3 jdk9-b52 effd5ef0c3eb4bb85aa975c489d6761dbf13ad6a jdk9-b53 c3b117fa5bdedfafd9ed236403e6d406911195b1 jdk9-b54 +be49ab55e5c498c5077bbf58c2737100d1992339 jdk9-b55 diff -r 17efac395638 -r a46e5922bb40 hotspot/agent/src/os/linux/libproc.h --- a/hotspot/agent/src/os/linux/libproc.h Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/agent/src/os/linux/libproc.h Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ #include #include "proc_service.h" -#if defined(arm) || defined(ppc) +#ifdef ALT_SASRCDIR #include "libproc_md.h" #endif diff -r 17efac395638 -r a46e5922bb40 hotspot/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java --- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java Wed Jul 05 20:25:13 2017 +0200 @@ -51,6 +51,9 @@ private static final int C_INT32_SIZE = 4; private static final int C_INT64_SIZE = 8; private static int pointerSize = UNINITIALIZED_SIZE; + // Counter to ensure read loops terminate: + private static final int MAX_DUPLICATE_DEFINITIONS = 100; + private int duplicateDefCount = 0; private static final boolean DEBUG; static { @@ -166,6 +169,10 @@ typeEntrySizeOffset = getLongValueFromProcess("gHotSpotVMTypeEntrySizeOffset"); typeEntryArrayStride = getLongValueFromProcess("gHotSpotVMTypeEntryArrayStride"); + if (typeEntryArrayStride == 0L) { + throw new RuntimeException("zero stride: cannot read types."); + } + // Start iterating down it until we find an entry with no name Address typeNameAddr = null; do { @@ -192,7 +199,11 @@ } entryAddr = entryAddr.addOffsetTo(typeEntryArrayStride); - } while (typeNameAddr != null); + } while (typeNameAddr != null && duplicateDefCount < MAX_DUPLICATE_DEFINITIONS); + + if (duplicateDefCount >= MAX_DUPLICATE_DEFINITIONS) { + throw new RuntimeException("too many duplicate definitions"); + } } private void initializePrimitiveTypes() { @@ -395,6 +406,10 @@ structEntryAddressOffset = getLongValueFromProcess("gHotSpotVMStructEntryAddressOffset"); structEntryArrayStride = getLongValueFromProcess("gHotSpotVMStructEntryArrayStride"); + if (structEntryArrayStride == 0L) { + throw new RuntimeException("zero stride: cannot read types."); + } + // Fetch the address of the VMStructEntry* Address entryAddr = lookupInProcess("gHotSpotVMStructs"); // Dereference this once to get the pointer to the first VMStructEntry @@ -472,6 +487,11 @@ intConstantEntryValueOffset = getLongValueFromProcess("gHotSpotVMIntConstantEntryValueOffset"); intConstantEntryArrayStride = getLongValueFromProcess("gHotSpotVMIntConstantEntryArrayStride"); + if (intConstantEntryArrayStride == 0L) { + throw new RuntimeException("zero stride: cannot read types."); + } + + // Fetch the address of the VMIntConstantEntry* Address entryAddr = lookupInProcess("gHotSpotVMIntConstants"); // Dereference this once to get the pointer to the first VMIntConstantEntry @@ -501,12 +521,17 @@ } else { System.err.println("Warning: the int constant \"" + name + "\" (declared in the remote VM in VMStructs::localHotSpotVMIntConstants) " + "had its value declared as " + value + " twice. Continuing."); + duplicateDefCount++; } } } entryAddr = entryAddr.addOffsetTo(intConstantEntryArrayStride); - } while (nameAddr != null); + } while (nameAddr != null && duplicateDefCount < MAX_DUPLICATE_DEFINITIONS); + + if (duplicateDefCount >= MAX_DUPLICATE_DEFINITIONS) { + throw new RuntimeException("too many duplicate definitions"); + } } private void readVMLongConstants() { @@ -519,6 +544,10 @@ longConstantEntryValueOffset = getLongValueFromProcess("gHotSpotVMLongConstantEntryValueOffset"); longConstantEntryArrayStride = getLongValueFromProcess("gHotSpotVMLongConstantEntryArrayStride"); + if (longConstantEntryArrayStride == 0L) { + throw new RuntimeException("zero stride: cannot read types."); + } + // Fetch the address of the VMLongConstantEntry* Address entryAddr = lookupInProcess("gHotSpotVMLongConstants"); // Dereference this once to get the pointer to the first VMLongConstantEntry @@ -548,12 +577,17 @@ } else { System.err.println("Warning: the long constant \"" + name + "\" (declared in the remote VM in VMStructs::localHotSpotVMLongConstants) " + "had its value declared as " + value + " twice. Continuing."); + duplicateDefCount++; } } } entryAddr = entryAddr.addOffsetTo(longConstantEntryArrayStride); - } while (nameAddr != null); + } while (nameAddr != null && duplicateDefCount < MAX_DUPLICATE_DEFINITIONS); + + if (duplicateDefCount >= MAX_DUPLICATE_DEFINITIONS) { + throw new RuntimeException("too many duplicate definitions."); + } } private BasicType lookupOrFail(String typeName) { @@ -740,9 +774,10 @@ } if (!typeNameIsPointerType(typeName)) { - System.err.println("Warning: the type \"" + typeName + "\" (declared in the remote VM in VMStructs::localHotSpotVMTypes) " + - "had its size declared as " + size + " twice. Continuing."); - } + System.err.println("Warning: the type \"" + typeName + "\" (declared in the remote VM in VMStructs::localHotSpotVMTypes) " + + "had its size declared as " + size + " twice. Continuing."); + duplicateDefCount++; + } } } diff -r 17efac395638 -r a46e5922bb40 hotspot/make/aix/makefiles/rules.make --- a/hotspot/make/aix/makefiles/rules.make Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/make/aix/makefiles/rules.make Wed Jul 05 20:25:13 2017 +0200 @@ -126,10 +126,17 @@ RUN.JAR$(MAKE_VERBOSE) += >/dev/null # Settings for javac -BOOT_SOURCE_LANGUAGE_VERSION = 6 -BOOT_TARGET_CLASS_VERSION = 6 JAVAC_FLAGS = -g -encoding ascii -BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION) + +# Prefer BOOT_JDK_SOURCETARGET if it's set (typically by the top build system) +# Fall back to the values here if it's not set (hotspot only builds) +ifeq ($(BOOT_JDK_SOURCETARGET),) +BOOTSTRAP_SOURCETARGET := -source 8 -target 8 +else +BOOTSTRAP_SOURCETARGET := $(BOOT_JDK_SOURCETARGET) +endif + +BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) $(BOOTSTRAP_SOURCETARGET) # With parallel makes, print a message at the end of compilation. ifeq ($(findstring j,$(MFLAGS)),j) diff -r 17efac395638 -r a46e5922bb40 hotspot/make/bsd/makefiles/rules.make --- a/hotspot/make/bsd/makefiles/rules.make Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/make/bsd/makefiles/rules.make Wed Jul 05 20:25:13 2017 +0200 @@ -126,10 +126,17 @@ RUN.JAR$(MAKE_VERBOSE) += >/dev/null # Settings for javac -BOOT_SOURCE_LANGUAGE_VERSION = 6 -BOOT_TARGET_CLASS_VERSION = 6 JAVAC_FLAGS = -g -encoding ascii -BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION) + +# Prefer BOOT_JDK_SOURCETARGET if it's set (typically by the top build system) +# Fall back to the values here if it's not set (hotspot only builds) +ifeq ($(BOOT_JDK_SOURCETARGET),) +BOOTSTRAP_SOURCETARGET := -source 8 -target 8 +else +BOOTSTRAP_SOURCETARGET := $(BOOT_JDK_SOURCETARGET) +endif + +BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) $(BOOTSTRAP_SOURCETARGET) # With parallel makes, print a message at the end of compilation. ifeq ($(findstring j,$(MFLAGS)),j) diff -r 17efac395638 -r a46e5922bb40 hotspot/make/defs.make --- a/hotspot/make/defs.make Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/make/defs.make Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -286,7 +286,7 @@ # Use uname output for SRCARCH, but deal with platform differences. If ARCH # is not explicitly listed below, it is treated as x86. - SRCARCH = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc ppc64 aarch64 zero,$(ARCH))) + SRCARCH ?= $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 ppc ppc64 aarch64 zero,$(ARCH))) ARCH/ = x86 ARCH/sparc = sparc ARCH/sparc64= sparc @@ -295,12 +295,11 @@ ARCH/x86_64 = x86 ARCH/ppc64 = ppc ARCH/ppc = ppc - ARCH/arm = arm ARCH/aarch64= aarch64 ARCH/zero = zero # BUILDARCH is usually the same as SRCARCH, except for sparcv9 - BUILDARCH = $(SRCARCH) + BUILDARCH ?= $(SRCARCH) ifeq ($(BUILDARCH), x86) ifdef LP64 BUILDARCH = amd64 @@ -320,7 +319,7 @@ endif # LIBARCH is 1:1 mapping from BUILDARCH - LIBARCH = $(LIBARCH/$(BUILDARCH)) + LIBARCH ?= $(LIBARCH/$(BUILDARCH)) LIBARCH/i486 = i386 LIBARCH/amd64 = amd64 LIBARCH/sparc = sparc @@ -328,8 +327,6 @@ LIBARCH/ia64 = ia64 LIBARCH/ppc64 = ppc64 LIBARCH/aarch64 = aarch64 - LIBARCH/ppc = ppc - LIBARCH/arm = arm LIBARCH/zero = $(ZERO_LIBARCH) LP64_ARCH = sparcv9 amd64 ia64 ppc64 aarch64 zero @@ -347,6 +344,8 @@ # includes this make/defs.make file. MAKE_ARGS += HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION) +MAKE_ARGS += BOOT_JDK_SOURCETARGET="$(BOOT_JDK_SOURCETARGET)" + # Various export sub directories EXPORT_INCLUDE_DIR = $(EXPORT_PATH)/include EXPORT_DOCS_DIR = $(EXPORT_PATH)/docs diff -r 17efac395638 -r a46e5922bb40 hotspot/make/linux/makefiles/arm.make --- a/hotspot/make/linux/makefiles/arm.make Thu Mar 19 16:13:40 2015 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,31 +0,0 @@ -# -# Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -Obj_Files += linux_arm.o - -ifneq ($(EXT_LIBS_PATH),) - LIBS += $(EXT_LIBS_PATH)/sflt_glibc.a -endif - -CFLAGS += -DVM_LITTLE_ENDIAN diff -r 17efac395638 -r a46e5922bb40 hotspot/make/linux/makefiles/buildtree.make --- a/hotspot/make/linux/makefiles/buildtree.make Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/make/linux/makefiles/buildtree.make Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -70,6 +70,8 @@ PLATFORM_FILE = $(GAMMADIR)/make/$(OS_FAMILY)/platform_$(BUILDARCH).suncc else PLATFORM_FILE = $(GAMMADIR)/make/$(OS_FAMILY)/platform_$(BUILDARCH) + ALT_PLATFORM_FILE = $(HS_ALT_MAKE)/$(OS_FAMILY)/platform_$(BUILDARCH) + PLATFORM_FILE := $(if $(wildcard $(ALT_PLATFORM_FILE)),$(ALT_PLATFORM_FILE),$(PLATFORM_FILE)) endif endif @@ -203,7 +205,7 @@ $(QUIETLY) ( \ $(BUILDTREE_COMMENT); \ echo; \ - echo "Platform_file = $(PLATFORM_FILE)" | sed 's|$(GAMMADIR)|$$(GAMMADIR)|'; \ + echo "Platform_file = $(PLATFORM_FILE)" | sed -e 's|$(HS_ALT_MAKE)|$$(HS_ALT_MAKE)|' -e 's|$(GAMMADIR)|$$(GAMMADIR)|'; \ sed -n '/=/s/^ */Platform_/p' < $(PLATFORM_FILE); \ echo; \ echo "GAMMADIR = $(GAMMADIR)"; \ diff -r 17efac395638 -r a46e5922bb40 hotspot/make/linux/makefiles/defs.make --- a/hotspot/make/linux/makefiles/defs.make Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/make/linux/makefiles/defs.make Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -105,14 +105,6 @@ HS_ARCH = x86 endif -# ARM -ifeq ($(ARCH), arm) - ARCH_DATA_MODEL = 32 - PLATFORM = linux-arm - VM_PLATFORM = linux_arm - HS_ARCH = arm -endif - # PPC # Notice: after 8046471 ARCH will be 'ppc' for top-level ppc64 builds but # 'ppc64' for HotSpot-only ppc64 builds. Need to detect both variants here! @@ -121,10 +113,6 @@ MAKE_ARGS += LP64=1 PLATFORM = linux-ppc64 VM_PLATFORM = linux_ppc64 - else - ARCH_DATA_MODEL = 32 - PLATFORM = linux-ppc - VM_PLATFORM = linux_ppc endif HS_ARCH = ppc diff -r 17efac395638 -r a46e5922bb40 hotspot/make/linux/makefiles/gcc.make --- a/hotspot/make/linux/makefiles/gcc.make Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/make/linux/makefiles/gcc.make Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -176,11 +176,7 @@ ARCHFLAG/ia64 = ARCHFLAG/sparc = -m32 -mcpu=v9 ARCHFLAG/sparcv9 = -m64 -mcpu=v9 -ARCHFLAG/arm = -fsigned-char ARCHFLAG/zero = $(ZERO_ARCHFLAG) -ifndef E500V2 -ARCHFLAG/ppc = -mcpu=powerpc -endif ARCHFLAG/ppc64 = -m64 CFLAGS += $(ARCHFLAG) @@ -188,10 +184,6 @@ LFLAGS += $(ARCHFLAG) ASFLAGS += $(ARCHFLAG) -ifdef E500V2 -CFLAGS += -DE500V2 -endif - # Use C++ Interpreter ifdef CC_INTERP CFLAGS += -DCC_INTERP @@ -391,3 +383,5 @@ ifndef USE_SUNCC CFLAGS += -fno-omit-frame-pointer endif + +-include $(HS_ALT_MAKE)/linux/makefiles/gcc.make diff -r 17efac395638 -r a46e5922bb40 hotspot/make/linux/makefiles/ppc.make --- a/hotspot/make/linux/makefiles/ppc.make Thu Mar 19 16:13:40 2015 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,33 +0,0 @@ -# -# Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# The copied fdlibm routines in sharedRuntimeTrig.o must not be optimized -OPT_CFLAGS/sharedRuntimeTrig.o = $(OPT_CFLAGS/NOOPT) - -# Must also specify if CPU is big endian -CFLAGS += -DVM_BIG_ENDIAN - -ifdef E500V2 -ASFLAGS += -Wa,-mspe -Wa,--defsym -Wa,E500V2=1 -endif diff -r 17efac395638 -r a46e5922bb40 hotspot/make/linux/makefiles/rules.make --- a/hotspot/make/linux/makefiles/rules.make Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/make/linux/makefiles/rules.make Wed Jul 05 20:25:13 2017 +0200 @@ -126,10 +126,17 @@ RUN.JAR$(MAKE_VERBOSE) += >/dev/null # Settings for javac -BOOT_SOURCE_LANGUAGE_VERSION = 6 -BOOT_TARGET_CLASS_VERSION = 6 JAVAC_FLAGS = -g -encoding ascii -BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION) + +# Prefer BOOT_JDK_SOURCETARGET if it's set (typically by the top build system) +# Fall back to the values here if it's not set (hotspot only builds) +ifeq ($(BOOT_JDK_SOURCETARGET),) +BOOTSTRAP_SOURCETARGET := -source 8 -target 8 +else +BOOTSTRAP_SOURCETARGET := $(BOOT_JDK_SOURCETARGET) +endif + +BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) $(BOOTSTRAP_SOURCETARGET) # With parallel makes, print a message at the end of compilation. ifeq ($(findstring j,$(MFLAGS)),j) diff -r 17efac395638 -r a46e5922bb40 hotspot/make/linux/makefiles/saproc.make --- a/hotspot/make/linux/makefiles/saproc.make Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/make/linux/makefiles/saproc.make Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -69,19 +69,21 @@ endif ifneq ($(ALT_SASRCDIR),) -ALT_SAINCDIR=-I$(ALT_SASRCDIR) +ALT_SAINCDIR=-I$(ALT_SASRCDIR) -DALT_SASRCDIR else ALT_SAINCDIR= endif SA_LFLAGS = $(MAPFLAG:FILENAME=$(SAMAPFILE)) $(LDFLAGS_HASH_STYLE) +SAARCH ?= $(BUILDARCH) + $(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE) $(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \ echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \ exit 1; \ fi @echo $(LOG_INFO) Making SA debugger back-end... - $(QUIETLY) $(CC) -D$(BUILDARCH) -D_GNU_SOURCE \ + $(QUIETLY) $(CC) -D$(SAARCH) -D_GNU_SOURCE \ -D_FILE_OFFSET_BITS=64 \ $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \ -I$(SASRCDIR) \ diff -r 17efac395638 -r a46e5922bb40 hotspot/make/linux/makefiles/vm.make --- a/hotspot/make/linux/makefiles/vm.make Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/make/linux/makefiles/vm.make Wed Jul 05 20:25:13 2017 +0200 @@ -45,8 +45,9 @@ ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true) include $(MAKEFILES_DIR)/zeroshark.make else - include $(MAKEFILES_DIR)/$(BUILDARCH).make - -include $(HS_ALT_MAKE)/$(Platform_os_family)/makefiles/$(BUILDARCH).make + BUILDARCH_MAKE = $(MAKEFILES_DIR)/$(BUILDARCH).make + ALT_BUILDARCH_MAKE = $(HS_ALT_MAKE)/$(Platform_os_family)/makefiles/$(BUILDARCH).make + include $(if $(wildcard $(ALT_BUILDARCH_MAKE)),$(ALT_BUILDARCH_MAKE),$(BUILDARCH_MAKE)) endif # set VPATH so make knows where to look for source files diff -r 17efac395638 -r a46e5922bb40 hotspot/make/linux/platform_arm --- a/hotspot/make/linux/platform_arm Thu Mar 19 16:13:40 2015 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,17 +0,0 @@ -os_family = linux - -arch = arm - -arch_model = arm - -os_arch = linux_arm - -os_arch_model = linux_arm - -lib_arch = arm - -compiler = gcc - -gnu_dis_arch = arm - -sysdefs = -DLINUX -D_GNU_SOURCE -DARM diff -r 17efac395638 -r a46e5922bb40 hotspot/make/linux/platform_ppc --- a/hotspot/make/linux/platform_ppc Thu Mar 19 16:13:40 2015 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,17 +0,0 @@ -os_family = linux - -arch = ppc - -arch_model = ppc_32 - -os_arch = linux_ppc - -os_arch_model = linux_ppc_32 - -lib_arch = ppc - -compiler = gcc - -gnu_dis_arch = ppc - -sysdefs = -DLINUX -D_GNU_SOURCE -DPPC32 diff -r 17efac395638 -r a46e5922bb40 hotspot/make/solaris/makefiles/rules.make --- a/hotspot/make/solaris/makefiles/rules.make Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/make/solaris/makefiles/rules.make Wed Jul 05 20:25:13 2017 +0200 @@ -118,10 +118,17 @@ RUN.JAR$(MAKE_VERBOSE) += >/dev/null # Settings for javac -BOOT_SOURCE_LANGUAGE_VERSION = 6 -BOOT_TARGET_CLASS_VERSION = 6 JAVAC_FLAGS = -g -encoding ascii -BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION) + +# Prefer BOOT_JDK_SOURCETARGET if it's set (typically by the top build system) +# Fall back to the values here if it's not set (hotspot only builds) +ifeq ($(BOOT_JDK_SOURCETARGET),) +BOOTSTRAP_SOURCETARGET := -source 8 -target 8 +else +BOOTSTRAP_SOURCETARGET := $(BOOT_JDK_SOURCETARGET) +endif + +BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) $(BOOTSTRAP_SOURCETARGET) # With parallel makes, print a message at the end of compilation. ifeq ($(findstring j,$(MFLAGS)),j) diff -r 17efac395638 -r a46e5922bb40 hotspot/make/windows/makefiles/rules.make --- a/hotspot/make/windows/makefiles/rules.make Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/make/windows/makefiles/rules.make Wed Jul 05 20:25:13 2017 +0200 @@ -44,10 +44,17 @@ !endif # Settings for javac -BOOT_SOURCE_LANGUAGE_VERSION=6 -BOOT_TARGET_CLASS_VERSION=6 JAVAC_FLAGS=-g -encoding ascii -BOOTSTRAP_JAVAC_FLAGS=$(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION) + +# Prefer BOOT_JDK_SOURCETARGET if it's set (typically by the top build system) +# Fall back to the values here if it's not set (hotspot only builds) +!ifndef BOOT_JDK_SOURCETARGET +BOOTSTRAP_SOURCETARGET=-source 8 -target 8 +!else +BOOTSTRAP_SOURCETARGET=$(BOOT_JDK_SOURCETARGET) +!endif + +BOOTSTRAP_JAVAC_FLAGS=$(JAVAC_FLAGS) $(BOOTSTRAP_SOURCETARGET) # VS2012 and VS2013 loads VS10 projects just fine (and will # upgrade them automatically to VS2012 format). diff -r 17efac395638 -r a46e5922bb40 hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp --- a/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -2776,7 +2776,6 @@ __ stop("unexpected profiling mismatch"); __ bind(ok); - __ pop(tmp); } #endif // first time here. Set profile type. diff -r 17efac395638 -r a46e5922bb40 hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp --- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -872,7 +872,7 @@ // stack grows down, caller passes positive offset assert(offset > 0, "must bang with negative offset"); mov(rscratch2, -offset); - ldr(zr, Address(sp, rscratch2)); + str(zr, Address(sp, rscratch2)); } // Writes to stack successive pages until offset reached to check for diff -r 17efac395638 -r a46e5922bb40 hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp --- a/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -858,7 +858,7 @@ const int page_size = os::vm_page_size(); for (int pages = start_page; pages <= StackShadowPages ; pages++) { __ sub(rscratch2, sp, pages*page_size); - __ ldr(zr, Address(rscratch2)); + __ str(zr, Address(rscratch2)); } } } diff -r 17efac395638 -r a46e5922bb40 hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp --- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -3947,12 +3947,10 @@ void LIR_Assembler::membar_acquire() { // No x86 machines currently require load fences - // __ load_fence(); } void LIR_Assembler::membar_release() { // No x86 machines currently require store fences - // __ store_fence(); } void LIR_Assembler::membar_loadload() { diff -r 17efac395638 -r a46e5922bb40 hotspot/src/cpu/x86/vm/templateTable_x86.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/cpu/x86/vm/templateTable_x86.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -0,0 +1,4235 @@ +/* + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.hpp" +#include "interpreter/interpreter.hpp" +#include "interpreter/interpreterRuntime.hpp" +#include "interpreter/interp_masm.hpp" +#include "interpreter/templateTable.hpp" +#include "memory/universe.inline.hpp" +#include "oops/methodData.hpp" +#include "oops/objArrayKlass.hpp" +#include "oops/oop.inline.hpp" +#include "prims/methodHandles.hpp" +#include "runtime/sharedRuntime.hpp" +#include "runtime/stubRoutines.hpp" +#include "runtime/synchronizer.hpp" +#include "utilities/macros.hpp" + +#ifndef CC_INTERP + +#define __ _masm-> + +// Global Register Names +Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi); +Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi); + +// Platform-dependent initialization +void TemplateTable::pd_initialize() { + // No x86 specific initialization +} + +// Address Computation: local variables +static inline Address iaddress(int n) { + return Address(rlocals, Interpreter::local_offset_in_bytes(n)); +} + +static inline Address laddress(int n) { + return iaddress(n + 1); +} + +#ifndef _LP64 +static inline Address haddress(int n) { + return iaddress(n + 0); +} +#endif + +static inline Address faddress(int n) { + return iaddress(n); +} + +static inline Address daddress(int n) { + return laddress(n); +} + +static inline Address aaddress(int n) { + return iaddress(n); +} + +static inline Address iaddress(Register r) { + return Address(rlocals, r, Address::times_ptr); +} + +static inline Address laddress(Register r) { + return Address(rlocals, r, Address::times_ptr, Interpreter::local_offset_in_bytes(1)); +} + +#ifndef _LP64 +static inline Address haddress(Register r) { + return Address(rlocals, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0)); +} +#endif + +static inline Address faddress(Register r) { + return iaddress(r); +} + +static inline Address daddress(Register r) { + return laddress(r); +} + +static inline Address aaddress(Register r) { + return iaddress(r); +} + + +// expression stack +// (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store +// data beyond the rsp which is potentially unsafe in an MT environment; +// an interrupt may overwrite that data.) +static inline Address at_rsp () { + return Address(rsp, 0); +} + +// At top of Java expression stack which may be different than esp(). It +// isn't for category 1 objects. +static inline Address at_tos () { + return Address(rsp, Interpreter::expr_offset_in_bytes(0)); +} + +static inline Address at_tos_p1() { + return Address(rsp, Interpreter::expr_offset_in_bytes(1)); +} + +static inline Address at_tos_p2() { + return Address(rsp, Interpreter::expr_offset_in_bytes(2)); +} + +// Condition conversion +static Assembler::Condition j_not(TemplateTable::Condition cc) { + switch (cc) { + case TemplateTable::equal : return Assembler::notEqual; + case TemplateTable::not_equal : return Assembler::equal; + case TemplateTable::less : return Assembler::greaterEqual; + case TemplateTable::less_equal : return Assembler::greater; + case TemplateTable::greater : return Assembler::lessEqual; + case TemplateTable::greater_equal: return Assembler::less; + } + ShouldNotReachHere(); + return Assembler::zero; +} + + + +// Miscelaneous helper routines +// Store an oop (or NULL) at the address described by obj. +// If val == noreg this means store a NULL + + +static void do_oop_store(InterpreterMacroAssembler* _masm, + Address obj, + Register val, + BarrierSet::Name barrier, + bool precise) { + assert(val == noreg || val == rax, "parameter is just for looks"); + switch (barrier) { +#if INCLUDE_ALL_GCS + case BarrierSet::G1SATBCT: + case BarrierSet::G1SATBCTLogging: + { + // flatten object address if needed + // We do it regardless of precise because we need the registers + if (obj.index() == noreg && obj.disp() == 0) { + if (obj.base() != rdx) { + __ movptr(rdx, obj.base()); + } + } else { + __ lea(rdx, obj); + } + + Register rtmp = LP64_ONLY(r8) NOT_LP64(rsi); + Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); + + NOT_LP64(__ get_thread(rcx)); + NOT_LP64(__ save_bcp()); + + __ g1_write_barrier_pre(rdx /* obj */, + rbx /* pre_val */, + rthread /* thread */, + rtmp /* tmp */, + val != noreg /* tosca_live */, + false /* expand_call */); + if (val == noreg) { + __ store_heap_oop_null(Address(rdx, 0)); + } else { + // G1 barrier needs uncompressed oop for region cross check. + Register new_val = val; + if (UseCompressedOops) { + new_val = rbx; + __ movptr(new_val, val); + } + __ store_heap_oop(Address(rdx, 0), val); + __ g1_write_barrier_post(rdx /* store_adr */, + new_val /* new_val */, + rthread /* thread */, + rtmp /* tmp */, + rbx /* tmp2 */); + } + NOT_LP64( __ restore_bcp()); + } + break; +#endif // INCLUDE_ALL_GCS + case BarrierSet::CardTableModRef: + case BarrierSet::CardTableExtension: + { + if (val == noreg) { + __ store_heap_oop_null(obj); + } else { + __ store_heap_oop(obj, val); + // flatten object address if needed + if (!precise || (obj.index() == noreg && obj.disp() == 0)) { + __ store_check(obj.base()); + } else { + __ lea(rdx, obj); + __ store_check(rdx); + } + } + } + break; + case BarrierSet::ModRef: + if (val == noreg) { + __ store_heap_oop_null(obj); + } else { + __ store_heap_oop(obj, val); + } + break; + default : + ShouldNotReachHere(); + + } +} + +Address TemplateTable::at_bcp(int offset) { + assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); + return Address(rbcp, offset); +} + + +void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, + Register temp_reg, bool load_bc_into_bc_reg/*=true*/, + int byte_no) { + if (!RewriteBytecodes) return; + Label L_patch_done; + + switch (bc) { + case Bytecodes::_fast_aputfield: + case Bytecodes::_fast_bputfield: + case Bytecodes::_fast_cputfield: + case Bytecodes::_fast_dputfield: + case Bytecodes::_fast_fputfield: + case Bytecodes::_fast_iputfield: + case Bytecodes::_fast_lputfield: + case Bytecodes::_fast_sputfield: + { + // We skip bytecode quickening for putfield instructions when + // the put_code written to the constant pool cache is zero. + // This is required so that every execution of this instruction + // calls out to InterpreterRuntime::resolve_get_put to do + // additional, required work. + assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); + assert(load_bc_into_bc_reg, "we use bc_reg as temp"); + __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1); + __ movl(bc_reg, bc); + __ cmpl(temp_reg, (int) 0); + __ jcc(Assembler::zero, L_patch_done); // don't patch + } + break; + default: + assert(byte_no == -1, "sanity"); + // the pair bytecodes have already done the load. + if (load_bc_into_bc_reg) { + __ movl(bc_reg, bc); + } + } + + if (JvmtiExport::can_post_breakpoint()) { + Label L_fast_patch; + // if a breakpoint is present we can't rewrite the stream directly + __ movzbl(temp_reg, at_bcp(0)); + __ cmpl(temp_reg, Bytecodes::_breakpoint); + __ jcc(Assembler::notEqual, L_fast_patch); + __ get_method(temp_reg); + // Let breakpoint table handling rewrite to quicker bytecode + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rbcp, bc_reg); +#ifndef ASSERT + __ jmpb(L_patch_done); +#else + __ jmp(L_patch_done); +#endif + __ bind(L_fast_patch); + } + +#ifdef ASSERT + Label L_okay; + __ load_unsigned_byte(temp_reg, at_bcp(0)); + __ cmpl(temp_reg, (int) Bytecodes::java_code(bc)); + __ jcc(Assembler::equal, L_okay); + __ cmpl(temp_reg, bc_reg); + __ jcc(Assembler::equal, L_okay); + __ stop("patching the wrong bytecode"); + __ bind(L_okay); +#endif + + // patch bytecode + __ movb(at_bcp(0), bc_reg); + __ bind(L_patch_done); +} +// Individual instructions + + +void TemplateTable::nop() { + transition(vtos, vtos); + // nothing to do +} + +void TemplateTable::shouldnotreachhere() { + transition(vtos, vtos); + __ stop("shouldnotreachhere bytecode"); +} + +void TemplateTable::aconst_null() { + transition(vtos, atos); + __ xorl(rax, rax); +} + +void TemplateTable::iconst(int value) { + transition(vtos, itos); + if (value == 0) { + __ xorl(rax, rax); + } else { + __ movl(rax, value); + } +} + +void TemplateTable::lconst(int value) { + transition(vtos, ltos); + if (value == 0) { + __ xorl(rax, rax); + } else { + __ movl(rax, value); + } +#ifndef _LP64 + assert(value >= 0, "check this code"); + __ xorptr(rdx, rdx); +#endif +} + + + +void TemplateTable::fconst(int value) { + transition(vtos, ftos); +#ifdef _LP64 + static float one = 1.0f, two = 2.0f; + switch (value) { + case 0: + __ xorps(xmm0, xmm0); + break; + case 1: + __ movflt(xmm0, ExternalAddress((address) &one)); + break; + case 2: + __ movflt(xmm0, ExternalAddress((address) &two)); + break; + default: + ShouldNotReachHere(); + break; + } +#else + if (value == 0) { __ fldz(); + } else if (value == 1) { __ fld1(); + } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here + } else { ShouldNotReachHere(); + } +#endif +} + +void TemplateTable::dconst(int value) { + transition(vtos, dtos); +#ifdef _LP64 + static double one = 1.0; + switch (value) { + case 0: + __ xorpd(xmm0, xmm0); + break; + case 1: + __ movdbl(xmm0, ExternalAddress((address) &one)); + break; + default: + ShouldNotReachHere(); + break; + } + +#else + if (value == 0) { __ fldz(); + } else if (value == 1) { __ fld1(); + } else { ShouldNotReachHere(); + } +#endif +} + +void TemplateTable::bipush() { + transition(vtos, itos); + __ load_signed_byte(rax, at_bcp(1)); +} + +void TemplateTable::sipush() { + transition(vtos, itos); + __ load_unsigned_short(rax, at_bcp(1)); + __ bswapl(rax); + __ sarl(rax, 16); +} + +void TemplateTable::ldc(bool wide) { + transition(vtos, vtos); + Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1); + Label call_ldc, notFloat, notClass, Done; + + if (wide) { + __ get_unsigned_2_byte_index_at_bcp(rbx, 1); + } else { + __ load_unsigned_byte(rbx, at_bcp(1)); + } + + __ get_cpool_and_tags(rcx, rax); + const int base_offset = ConstantPool::header_size() * wordSize; + const int tags_offset = Array::base_offset_in_bytes(); + + // get type + __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset)); + + // unresolved class - get the resolved class + __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass); + __ jccb(Assembler::equal, call_ldc); + + // unresolved class in error state - call into runtime to throw the error + // from the first resolution attempt + __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError); + __ jccb(Assembler::equal, call_ldc); + + // resolved class - need to call vm to get java mirror of the class + __ cmpl(rdx, JVM_CONSTANT_Class); + __ jcc(Assembler::notEqual, notClass); + + __ bind(call_ldc); + + __ movl(rarg, wide); + call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rarg); + + __ push(atos); + __ jmp(Done); + + __ bind(notClass); + __ cmpl(rdx, JVM_CONSTANT_Float); + __ jccb(Assembler::notEqual, notFloat); + + // ftos + LP64_ONLY(__ movflt(xmm0, Address(rcx, rbx, Address::times_8, base_offset))); + NOT_LP64(__ fld_s( Address(rcx, rbx, Address::times_ptr, base_offset))); + __ push(ftos); + __ jmp(Done); + + __ bind(notFloat); +#ifdef ASSERT + { + Label L; + __ cmpl(rdx, JVM_CONSTANT_Integer); + __ jcc(Assembler::equal, L); + // String and Object are rewritten to fast_aldc + __ stop("unexpected tag type in ldc"); + __ bind(L); + } +#endif + // itos JVM_CONSTANT_Integer only + __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset)); + __ push(itos); + __ bind(Done); +} + +// Fast path for caching oop constants. +void TemplateTable::fast_aldc(bool wide) { + transition(vtos, atos); + + Register result = rax; + Register tmp = rdx; + int index_size = wide ? sizeof(u2) : sizeof(u1); + + Label resolved; + + // We are resolved if the resolved reference cache entry contains a + // non-null object (String, MethodType, etc.) + assert_different_registers(result, tmp); + __ get_cache_index_at_bcp(tmp, 1, index_size); + __ load_resolved_reference_at_index(result, tmp); + __ testl(result, result); + __ jcc(Assembler::notZero, resolved); + + address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); + + // first time invocation - must resolve first + __ movl(tmp, (int)bytecode()); + __ call_VM(result, entry, tmp); + + __ bind(resolved); + + if (VerifyOops) { + __ verify_oop(result); + } +} + +void TemplateTable::ldc2_w() { + transition(vtos, vtos); + Label Long, Done; + __ get_unsigned_2_byte_index_at_bcp(rbx, 1); + + __ get_cpool_and_tags(rcx, rax); + const int base_offset = ConstantPool::header_size() * wordSize; + const int tags_offset = Array::base_offset_in_bytes(); + + // get type + __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), + JVM_CONSTANT_Double); + __ jccb(Assembler::notEqual, Long); + + // dtos + LP64_ONLY(__ movdbl(xmm0, Address(rcx, rbx, Address::times_8, base_offset))); + NOT_LP64(__ fld_d( Address(rcx, rbx, Address::times_ptr, base_offset))); + __ push(dtos); + + __ jmpb(Done); + __ bind(Long); + + // ltos + __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize)); + NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize))); + __ push(ltos); + + __ bind(Done); +} + +void TemplateTable::locals_index(Register reg, int offset) { + __ load_unsigned_byte(reg, at_bcp(offset)); + __ negptr(reg); +} + +void TemplateTable::iload() { + transition(vtos, itos); + if (RewriteFrequentPairs) { + Label rewrite, done; + const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx); + LP64_ONLY(assert(rbx != bc, "register damaged")); + + // get next byte + __ load_unsigned_byte(rbx, + at_bcp(Bytecodes::length_for(Bytecodes::_iload))); + // if _iload, wait to rewrite to iload2. We only want to rewrite the + // last two iloads in a pair. Comparing against fast_iload means that + // the next bytecode is neither an iload or a caload, and therefore + // an iload pair. + __ cmpl(rbx, Bytecodes::_iload); + __ jcc(Assembler::equal, done); + + __ cmpl(rbx, Bytecodes::_fast_iload); + __ movl(bc, Bytecodes::_fast_iload2); + + __ jccb(Assembler::equal, rewrite); + + // if _caload, rewrite to fast_icaload + __ cmpl(rbx, Bytecodes::_caload); + __ movl(bc, Bytecodes::_fast_icaload); + __ jccb(Assembler::equal, rewrite); + + // rewrite so iload doesn't check again. + __ movl(bc, Bytecodes::_fast_iload); + + // rewrite + // bc: fast bytecode + __ bind(rewrite); + patch_bytecode(Bytecodes::_iload, bc, rbx, false); + __ bind(done); + } + + // Get the local value into tos + locals_index(rbx); + __ movl(rax, iaddress(rbx)); +} + +void TemplateTable::fast_iload2() { + transition(vtos, itos); + locals_index(rbx); + __ movl(rax, iaddress(rbx)); + __ push(itos); + locals_index(rbx, 3); + __ movl(rax, iaddress(rbx)); +} + +void TemplateTable::fast_iload() { + transition(vtos, itos); + locals_index(rbx); + __ movl(rax, iaddress(rbx)); +} + +void TemplateTable::lload() { + transition(vtos, ltos); + locals_index(rbx); + __ movptr(rax, laddress(rbx)); + NOT_LP64(__ movl(rdx, haddress(rbx))); +} + +void TemplateTable::fload() { + transition(vtos, ftos); + locals_index(rbx); + LP64_ONLY(__ movflt(xmm0, faddress(rbx))); + NOT_LP64(__ fld_s(faddress(rbx))); +} + +void TemplateTable::dload() { + transition(vtos, dtos); + locals_index(rbx); + LP64_ONLY(__ movdbl(xmm0, daddress(rbx))); + NOT_LP64(__ fld_d(daddress(rbx))); +} + +void TemplateTable::aload() { + transition(vtos, atos); + locals_index(rbx); + __ movptr(rax, aaddress(rbx)); +} + +void TemplateTable::locals_index_wide(Register reg) { + __ load_unsigned_short(reg, at_bcp(2)); + __ bswapl(reg); + __ shrl(reg, 16); + __ negptr(reg); +} + +void TemplateTable::wide_iload() { + transition(vtos, itos); + locals_index_wide(rbx); + __ movl(rax, iaddress(rbx)); +} + +void TemplateTable::wide_lload() { + transition(vtos, ltos); + locals_index_wide(rbx); + __ movptr(rax, laddress(rbx)); + NOT_LP64(__ movl(rdx, haddress(rbx))); +} + +void TemplateTable::wide_fload() { + transition(vtos, ftos); + locals_index_wide(rbx); + LP64_ONLY(__ movflt(xmm0, faddress(rbx))); + NOT_LP64(__ fld_s(faddress(rbx))); +} + +void TemplateTable::wide_dload() { + transition(vtos, dtos); + locals_index_wide(rbx); + LP64_ONLY(__ movdbl(xmm0, daddress(rbx))); + NOT_LP64(__ fld_d(daddress(rbx))); +} + +void TemplateTable::wide_aload() { + transition(vtos, atos); + locals_index_wide(rbx); + __ movptr(rax, aaddress(rbx)); +} + +void TemplateTable::index_check(Register array, Register index) { + // Pop ptr into array + __ pop_ptr(array); + index_check_without_pop(array, index); +} + +void TemplateTable::index_check_without_pop(Register array, Register index) { + // destroys rbx + // check array + __ null_check(array, arrayOopDesc::length_offset_in_bytes()); + // sign extend index for use by indexed load + __ movl2ptr(index, index); + // check index + __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes())); + if (index != rbx) { + // ??? convention: move aberrant index into rbx for exception message + assert(rbx != array, "different registers"); + __ movl(rbx, index); + } + __ jump_cc(Assembler::aboveEqual, + ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry)); +} + + +void TemplateTable::iaload() { + transition(itos, itos); + // rax: index + // rdx: array + index_check(rdx, rax); // kills rbx + __ movl(rax, Address(rdx, rax, + Address::times_4, + arrayOopDesc::base_offset_in_bytes(T_INT))); +} + +void TemplateTable::laload() { + transition(itos, ltos); + // rax: index + // rdx: array + index_check(rdx, rax); // kills rbx + NOT_LP64(__ mov(rbx, rax)); + // rbx,: index + __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize)); + NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize))); +} + + + +void TemplateTable::faload() { + transition(itos, ftos); + // rax: index + // rdx: array + index_check(rdx, rax); // kills rbx + LP64_ONLY(__ movflt(xmm0, Address(rdx, rax, + Address::times_4, + arrayOopDesc::base_offset_in_bytes(T_FLOAT)))); + NOT_LP64(__ fld_s(Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)))); +} + +void TemplateTable::daload() { + transition(itos, dtos); + // rax: index + // rdx: array + index_check(rdx, rax); // kills rbx + LP64_ONLY(__ movdbl(xmm0, Address(rdx, rax, + Address::times_8, + arrayOopDesc::base_offset_in_bytes(T_DOUBLE)))); + NOT_LP64(__ fld_d(Address(rdx, rax, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)))); +} + +void TemplateTable::aaload() { + transition(itos, atos); + // rax: index + // rdx: array + index_check(rdx, rax); // kills rbx + __ load_heap_oop(rax, Address(rdx, rax, + UseCompressedOops ? Address::times_4 : Address::times_ptr, + arrayOopDesc::base_offset_in_bytes(T_OBJECT))); +} + +void TemplateTable::baload() { + transition(itos, itos); + // rax: index + // rdx: array + index_check(rdx, rax); // kills rbx + __ load_signed_byte(rax, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE))); +} + +void TemplateTable::caload() { + transition(itos, itos); + // rax: index + // rdx: array + index_check(rdx, rax); // kills rbx + __ load_unsigned_short(rax, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); +} + +// iload followed by caload frequent pair +void TemplateTable::fast_icaload() { + transition(vtos, itos); + // load index out of locals + locals_index(rbx); + __ movl(rax, iaddress(rbx)); + + // rax: index + // rdx: array + index_check(rdx, rax); // kills rbx + __ load_unsigned_short(rax, + Address(rdx, rax, + Address::times_2, + arrayOopDesc::base_offset_in_bytes(T_CHAR))); +} + + +void TemplateTable::saload() { + transition(itos, itos); + // rax: index + // rdx: array + index_check(rdx, rax); // kills rbx + __ load_signed_short(rax, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT))); +} + +void TemplateTable::iload(int n) { + transition(vtos, itos); + __ movl(rax, iaddress(n)); +} + +void TemplateTable::lload(int n) { + transition(vtos, ltos); + __ movptr(rax, laddress(n)); + NOT_LP64(__ movptr(rdx, haddress(n))); +} + +void TemplateTable::fload(int n) { + transition(vtos, ftos); + LP64_ONLY(__ movflt(xmm0, faddress(n))); + NOT_LP64(__ fld_s(faddress(n))); +} + +void TemplateTable::dload(int n) { + transition(vtos, dtos); + LP64_ONLY(__ movdbl(xmm0, daddress(n))); + NOT_LP64(__ fld_d(daddress(n))); +} + +void TemplateTable::aload(int n) { + transition(vtos, atos); + __ movptr(rax, aaddress(n)); +} + +void TemplateTable::aload_0() { + transition(vtos, atos); + // According to bytecode histograms, the pairs: + // + // _aload_0, _fast_igetfield + // _aload_0, _fast_agetfield + // _aload_0, _fast_fgetfield + // + // occur frequently. If RewriteFrequentPairs is set, the (slow) + // _aload_0 bytecode checks if the next bytecode is either + // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then + // rewrites the current bytecode into a pair bytecode; otherwise it + // rewrites the current bytecode into _fast_aload_0 that doesn't do + // the pair check anymore. + // + // Note: If the next bytecode is _getfield, the rewrite must be + // delayed, otherwise we may miss an opportunity for a pair. + // + // Also rewrite frequent pairs + // aload_0, aload_1 + // aload_0, iload_1 + // These bytecodes with a small amount of code are most profitable + // to rewrite + if (RewriteFrequentPairs) { + Label rewrite, done; + + const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx); + LP64_ONLY(assert(rbx != bc, "register damaged")); + + // get next byte + __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0))); + + // do actual aload_0 + aload(0); + + // if _getfield then wait with rewrite + __ cmpl(rbx, Bytecodes::_getfield); + __ jcc(Assembler::equal, done); + + // if _igetfield then reqrite to _fast_iaccess_0 + assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); + __ cmpl(rbx, Bytecodes::_fast_igetfield); + __ movl(bc, Bytecodes::_fast_iaccess_0); + __ jccb(Assembler::equal, rewrite); + + // if _agetfield then reqrite to _fast_aaccess_0 + assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); + __ cmpl(rbx, Bytecodes::_fast_agetfield); + __ movl(bc, Bytecodes::_fast_aaccess_0); + __ jccb(Assembler::equal, rewrite); + + // if _fgetfield then reqrite to _fast_faccess_0 + assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); + __ cmpl(rbx, Bytecodes::_fast_fgetfield); + __ movl(bc, Bytecodes::_fast_faccess_0); + __ jccb(Assembler::equal, rewrite); + + // else rewrite to _fast_aload0 + assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition"); + __ movl(bc, Bytecodes::_fast_aload_0); + + // rewrite + // bc: fast bytecode + __ bind(rewrite); + patch_bytecode(Bytecodes::_aload_0, bc, rbx, false); + + __ bind(done); + } else { + aload(0); + } +} + +void TemplateTable::istore() { + transition(itos, vtos); + locals_index(rbx); + __ movl(iaddress(rbx), rax); +} + + +void TemplateTable::lstore() { + transition(ltos, vtos); + locals_index(rbx); + __ movptr(laddress(rbx), rax); + NOT_LP64(__ movptr(haddress(rbx), rdx)); +} + +void TemplateTable::fstore() { + transition(ftos, vtos); + locals_index(rbx); + LP64_ONLY(__ movflt(faddress(rbx), xmm0)); + NOT_LP64(__ fstp_s(faddress(rbx))); +} + +void TemplateTable::dstore() { + transition(dtos, vtos); + locals_index(rbx); + LP64_ONLY(__ movdbl(daddress(rbx), xmm0)); + NOT_LP64(__ fstp_d(daddress(rbx))); +} + +void TemplateTable::astore() { + transition(vtos, vtos); + __ pop_ptr(rax); + locals_index(rbx); + __ movptr(aaddress(rbx), rax); +} + +void TemplateTable::wide_istore() { + transition(vtos, vtos); + __ pop_i(); + locals_index_wide(rbx); + __ movl(iaddress(rbx), rax); +} + +void TemplateTable::wide_lstore() { + transition(vtos, vtos); + NOT_LP64(__ pop_l(rax, rdx)); + LP64_ONLY(__ pop_l()); + locals_index_wide(rbx); + __ movptr(laddress(rbx), rax); + NOT_LP64(__ movl(haddress(rbx), rdx)); +} + +void TemplateTable::wide_fstore() { +#ifdef _LP64 + transition(vtos, vtos); + __ pop_f(); + locals_index_wide(rbx); + __ movflt(faddress(rbx), xmm0); +#else + wide_istore(); +#endif +} + +void TemplateTable::wide_dstore() { +#ifdef _LP64 + transition(vtos, vtos); + __ pop_d(); + locals_index_wide(rbx); + __ movdbl(daddress(rbx), xmm0); +#else + wide_lstore(); +#endif +} + +void TemplateTable::wide_astore() { + transition(vtos, vtos); + __ pop_ptr(rax); + locals_index_wide(rbx); + __ movptr(aaddress(rbx), rax); +} + +void TemplateTable::iastore() { + transition(itos, vtos); + __ pop_i(rbx); + // rax: value + // rbx: index + // rdx: array + index_check(rdx, rbx); // prefer index in rbx + __ movl(Address(rdx, rbx, + Address::times_4, + arrayOopDesc::base_offset_in_bytes(T_INT)), + rax); +} + +void TemplateTable::lastore() { + transition(ltos, vtos); + __ pop_i(rbx); + // rax,: low(value) + // rcx: array + // rdx: high(value) + index_check(rcx, rbx); // prefer index in rbx, + // rbx,: index + __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax); + NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx)); +} + + +void TemplateTable::fastore() { + transition(ftos, vtos); + __ pop_i(rbx); + // xmm0: value + // rbx: index + // rdx: array + index_check(rdx, rbx); // prefer index in rbx + LP64_ONLY(__ movflt(Address(rdx, rbx, + Address::times_4, + arrayOopDesc::base_offset_in_bytes(T_FLOAT)), + xmm0)); + NOT_LP64(__ fstp_s(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)))); +} + +void TemplateTable::dastore() { + transition(dtos, vtos); + __ pop_i(rbx); + // xmm0: value + // rbx: index + // rdx: array + index_check(rdx, rbx); // prefer index in rbx + LP64_ONLY(__ movdbl(Address(rdx, rbx, + Address::times_8, + arrayOopDesc::base_offset_in_bytes(T_DOUBLE)), + xmm0)); + NOT_LP64(__ fstp_d(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)))); +} + +void TemplateTable::aastore() { + Label is_null, ok_is_subtype, done; + transition(vtos, vtos); + // stack: ..., array, index, value + __ movptr(rax, at_tos()); // value + __ movl(rcx, at_tos_p1()); // index + __ movptr(rdx, at_tos_p2()); // array + + Address element_address(rdx, rcx, + UseCompressedOops? Address::times_4 : Address::times_ptr, + arrayOopDesc::base_offset_in_bytes(T_OBJECT)); + + index_check_without_pop(rdx, rcx); // kills rbx + __ testptr(rax, rax); + __ jcc(Assembler::zero, is_null); + + // Move subklass into rbx + __ load_klass(rbx, rax); + // Move superklass into rax + __ load_klass(rax, rdx); + __ movptr(rax, Address(rax, + ObjArrayKlass::element_klass_offset())); + // Compress array + index*oopSize + 12 into a single register. Frees rcx. + __ lea(rdx, element_address); + + // Generate subtype check. Blows rcx, rdi + // Superklass in rax. Subklass in rbx. + __ gen_subtype_check(rbx, ok_is_subtype); + + // Come here on failure + // object is at TOS + __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry)); + + // Come here on success + __ bind(ok_is_subtype); + + // Get the value we will store + __ movptr(rax, at_tos()); + // Now store using the appropriate barrier + do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true); + __ jmp(done); + + // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx] + __ bind(is_null); + __ profile_null_seen(rbx); + + // Store a NULL + do_oop_store(_masm, element_address, noreg, _bs->kind(), true); + + // Pop stack arguments + __ bind(done); + __ addptr(rsp, 3 * Interpreter::stackElementSize); +} + +void TemplateTable::bastore() { + transition(itos, vtos); + __ pop_i(rbx); + // rax: value + // rbx: index + // rdx: array + index_check(rdx, rbx); // prefer index in rbx + __ movb(Address(rdx, rbx, + Address::times_1, + arrayOopDesc::base_offset_in_bytes(T_BYTE)), + rax); +} + +void TemplateTable::castore() { + transition(itos, vtos); + __ pop_i(rbx); + // rax: value + // rbx: index + // rdx: array + index_check(rdx, rbx); // prefer index in rbx + __ movw(Address(rdx, rbx, + Address::times_2, + arrayOopDesc::base_offset_in_bytes(T_CHAR)), + rax); +} + + +void TemplateTable::sastore() { + castore(); +} + +void TemplateTable::istore(int n) { + transition(itos, vtos); + __ movl(iaddress(n), rax); +} + +void TemplateTable::lstore(int n) { + transition(ltos, vtos); + __ movptr(laddress(n), rax); + NOT_LP64(__ movptr(haddress(n), rdx)); +} + +void TemplateTable::fstore(int n) { + transition(ftos, vtos); + LP64_ONLY(__ movflt(faddress(n), xmm0)); + NOT_LP64(__ fstp_s(faddress(n))); +} + +void TemplateTable::dstore(int n) { + transition(dtos, vtos); + LP64_ONLY(__ movdbl(daddress(n), xmm0)); + NOT_LP64(__ fstp_d(daddress(n))); +} + + +void TemplateTable::astore(int n) { + transition(vtos, vtos); + __ pop_ptr(rax); + __ movptr(aaddress(n), rax); +} + +void TemplateTable::pop() { + transition(vtos, vtos); + __ addptr(rsp, Interpreter::stackElementSize); +} + +void TemplateTable::pop2() { + transition(vtos, vtos); + __ addptr(rsp, 2 * Interpreter::stackElementSize); +} + + +void TemplateTable::dup() { + transition(vtos, vtos); + __ load_ptr(0, rax); + __ push_ptr(rax); + // stack: ..., a, a +} + +void TemplateTable::dup_x1() { + transition(vtos, vtos); + // stack: ..., a, b + __ load_ptr( 0, rax); // load b + __ load_ptr( 1, rcx); // load a + __ store_ptr(1, rax); // store b + __ store_ptr(0, rcx); // store a + __ push_ptr(rax); // push b + // stack: ..., b, a, b +} + +void TemplateTable::dup_x2() { + transition(vtos, vtos); + // stack: ..., a, b, c + __ load_ptr( 0, rax); // load c + __ load_ptr( 2, rcx); // load a + __ store_ptr(2, rax); // store c in a + __ push_ptr(rax); // push c + // stack: ..., c, b, c, c + __ load_ptr( 2, rax); // load b + __ store_ptr(2, rcx); // store a in b + // stack: ..., c, a, c, c + __ store_ptr(1, rax); // store b in c + // stack: ..., c, a, b, c +} + +void TemplateTable::dup2() { + transition(vtos, vtos); + // stack: ..., a, b + __ load_ptr(1, rax); // load a + __ push_ptr(rax); // push a + __ load_ptr(1, rax); // load b + __ push_ptr(rax); // push b + // stack: ..., a, b, a, b +} + + +void TemplateTable::dup2_x1() { + transition(vtos, vtos); + // stack: ..., a, b, c + __ load_ptr( 0, rcx); // load c + __ load_ptr( 1, rax); // load b + __ push_ptr(rax); // push b + __ push_ptr(rcx); // push c + // stack: ..., a, b, c, b, c + __ store_ptr(3, rcx); // store c in b + // stack: ..., a, c, c, b, c + __ load_ptr( 4, rcx); // load a + __ store_ptr(2, rcx); // store a in 2nd c + // stack: ..., a, c, a, b, c + __ store_ptr(4, rax); // store b in a + // stack: ..., b, c, a, b, c +} + +void TemplateTable::dup2_x2() { + transition(vtos, vtos); + // stack: ..., a, b, c, d + __ load_ptr( 0, rcx); // load d + __ load_ptr( 1, rax); // load c + __ push_ptr(rax); // push c + __ push_ptr(rcx); // push d + // stack: ..., a, b, c, d, c, d + __ load_ptr( 4, rax); // load b + __ store_ptr(2, rax); // store b in d + __ store_ptr(4, rcx); // store d in b + // stack: ..., a, d, c, b, c, d + __ load_ptr( 5, rcx); // load a + __ load_ptr( 3, rax); // load c + __ store_ptr(3, rcx); // store a in c + __ store_ptr(5, rax); // store c in a + // stack: ..., c, d, a, b, c, d +} + +void TemplateTable::swap() { + transition(vtos, vtos); + // stack: ..., a, b + __ load_ptr( 1, rcx); // load a + __ load_ptr( 0, rax); // load b + __ store_ptr(0, rcx); // store a in b + __ store_ptr(1, rax); // store b in a + // stack: ..., b, a +} + +void TemplateTable::iop2(Operation op) { + transition(itos, itos); + switch (op) { + case add : __ pop_i(rdx); __ addl (rax, rdx); break; + case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break; + case mul : __ pop_i(rdx); __ imull(rax, rdx); break; + case _and : __ pop_i(rdx); __ andl (rax, rdx); break; + case _or : __ pop_i(rdx); __ orl (rax, rdx); break; + case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break; + case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break; + case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break; + case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break; + default : ShouldNotReachHere(); + } +} + +void TemplateTable::lop2(Operation op) { + transition(ltos, ltos); +#ifdef _LP64 + switch (op) { + case add : __ pop_l(rdx); __ addptr(rax, rdx); break; + case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break; + case _and : __ pop_l(rdx); __ andptr(rax, rdx); break; + case _or : __ pop_l(rdx); __ orptr (rax, rdx); break; + case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break; + default : ShouldNotReachHere(); + } +#else + __ pop_l(rbx, rcx); + switch (op) { + case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break; + case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx); + __ mov (rax, rbx); __ mov (rdx, rcx); break; + case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break; + case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break; + case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break; + default : ShouldNotReachHere(); + } +#endif +} + +void TemplateTable::idiv() { + transition(itos, itos); + __ movl(rcx, rax); + __ pop_i(rax); + // Note: could xor rax and ecx and compare with (-1 ^ min_int). If + // they are not equal, one could do a normal division (no correction + // needed), which may speed up this implementation for the common case. + // (see also JVM spec., p.243 & p.271) + __ corrected_idivl(rcx); +} + +void TemplateTable::irem() { + transition(itos, itos); + __ movl(rcx, rax); + __ pop_i(rax); + // Note: could xor rax and ecx and compare with (-1 ^ min_int). If + // they are not equal, one could do a normal division (no correction + // needed), which may speed up this implementation for the common case. + // (see also JVM spec., p.243 & p.271) + __ corrected_idivl(rcx); + __ movl(rax, rdx); +} + +void TemplateTable::lmul() { + transition(ltos, ltos); +#ifdef _LP64 + __ pop_l(rdx); + __ imulq(rax, rdx); +#else + __ pop_l(rbx, rcx); + __ push(rcx); __ push(rbx); + __ push(rdx); __ push(rax); + __ lmul(2 * wordSize, 0); + __ addptr(rsp, 4 * wordSize); // take off temporaries +#endif +} + +void TemplateTable::ldiv() { + transition(ltos, ltos); +#ifdef _LP64 + __ mov(rcx, rax); + __ pop_l(rax); + // generate explicit div0 check + __ testq(rcx, rcx); + __ jump_cc(Assembler::zero, + ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); + // Note: could xor rax and rcx and compare with (-1 ^ min_int). If + // they are not equal, one could do a normal division (no correction + // needed), which may speed up this implementation for the common case. + // (see also JVM spec., p.243 & p.271) + __ corrected_idivq(rcx); // kills rbx +#else + __ pop_l(rbx, rcx); + __ push(rcx); __ push(rbx); + __ push(rdx); __ push(rax); + // check if y = 0 + __ orl(rax, rdx); + __ jump_cc(Assembler::zero, + ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv)); + __ addptr(rsp, 4 * wordSize); // take off temporaries +#endif +} + +void TemplateTable::lrem() { + transition(ltos, ltos); +#ifdef _LP64 + __ mov(rcx, rax); + __ pop_l(rax); + __ testq(rcx, rcx); + __ jump_cc(Assembler::zero, + ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); + // Note: could xor rax and rcx and compare with (-1 ^ min_int). If + // they are not equal, one could do a normal division (no correction + // needed), which may speed up this implementation for the common case. + // (see also JVM spec., p.243 & p.271) + __ corrected_idivq(rcx); // kills rbx + __ mov(rax, rdx); +#else + __ pop_l(rbx, rcx); + __ push(rcx); __ push(rbx); + __ push(rdx); __ push(rax); + // check if y = 0 + __ orl(rax, rdx); + __ jump_cc(Assembler::zero, + ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem)); + __ addptr(rsp, 4 * wordSize); +#endif +} + +void TemplateTable::lshl() { + transition(itos, ltos); + __ movl(rcx, rax); // get shift count + #ifdef _LP64 + __ pop_l(rax); // get shift value + __ shlq(rax); +#else + __ pop_l(rax, rdx); // get shift value + __ lshl(rdx, rax); +#endif +} + +void TemplateTable::lshr() { +#ifdef _LP64 + transition(itos, ltos); + __ movl(rcx, rax); // get shift count + __ pop_l(rax); // get shift value + __ sarq(rax); +#else + transition(itos, ltos); + __ mov(rcx, rax); // get shift count + __ pop_l(rax, rdx); // get shift value + __ lshr(rdx, rax, true); +#endif +} + +void TemplateTable::lushr() { + transition(itos, ltos); +#ifdef _LP64 + __ movl(rcx, rax); // get shift count + __ pop_l(rax); // get shift value + __ shrq(rax); +#else + __ mov(rcx, rax); // get shift count + __ pop_l(rax, rdx); // get shift value + __ lshr(rdx, rax); +#endif +} + +void TemplateTable::fop2(Operation op) { + transition(ftos, ftos); +#ifdef _LP64 + switch (op) { + case add: + __ addss(xmm0, at_rsp()); + __ addptr(rsp, Interpreter::stackElementSize); + break; + case sub: + __ movflt(xmm1, xmm0); + __ pop_f(xmm0); + __ subss(xmm0, xmm1); + break; + case mul: + __ mulss(xmm0, at_rsp()); + __ addptr(rsp, Interpreter::stackElementSize); + break; + case div: + __ movflt(xmm1, xmm0); + __ pop_f(xmm0); + __ divss(xmm0, xmm1); + break; + case rem: + __ movflt(xmm1, xmm0); + __ pop_f(xmm0); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2); + break; + default: + ShouldNotReachHere(); + break; + } +#else + switch (op) { + case add: __ fadd_s (at_rsp()); break; + case sub: __ fsubr_s(at_rsp()); break; + case mul: __ fmul_s (at_rsp()); break; + case div: __ fdivr_s(at_rsp()); break; + case rem: __ fld_s (at_rsp()); __ fremr(rax); break; + default : ShouldNotReachHere(); + } + __ f2ieee(); + __ pop(rax); // pop float thing off +#endif +} + +void TemplateTable::dop2(Operation op) { + transition(dtos, dtos); +#ifdef _LP64 + switch (op) { + case add: + __ addsd(xmm0, at_rsp()); + __ addptr(rsp, 2 * Interpreter::stackElementSize); + break; + case sub: + __ movdbl(xmm1, xmm0); + __ pop_d(xmm0); + __ subsd(xmm0, xmm1); + break; + case mul: + __ mulsd(xmm0, at_rsp()); + __ addptr(rsp, 2 * Interpreter::stackElementSize); + break; + case div: + __ movdbl(xmm1, xmm0); + __ pop_d(xmm0); + __ divsd(xmm0, xmm1); + break; + case rem: + __ movdbl(xmm1, xmm0); + __ pop_d(xmm0); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2); + break; + default: + ShouldNotReachHere(); + break; + } +#else + switch (op) { + case add: __ fadd_d (at_rsp()); break; + case sub: __ fsubr_d(at_rsp()); break; + case mul: { + Label L_strict; + Label L_join; + const Address access_flags (rcx, Method::access_flags_offset()); + __ get_method(rcx); + __ movl(rcx, access_flags); + __ testl(rcx, JVM_ACC_STRICT); + __ jccb(Assembler::notZero, L_strict); + __ fmul_d (at_rsp()); + __ jmpb(L_join); + __ bind(L_strict); + __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1())); + __ fmulp(); + __ fmul_d (at_rsp()); + __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2())); + __ fmulp(); + __ bind(L_join); + break; + } + case div: { + Label L_strict; + Label L_join; + const Address access_flags (rcx, Method::access_flags_offset()); + __ get_method(rcx); + __ movl(rcx, access_flags); + __ testl(rcx, JVM_ACC_STRICT); + __ jccb(Assembler::notZero, L_strict); + __ fdivr_d(at_rsp()); + __ jmp(L_join); + __ bind(L_strict); + __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1())); + __ fmul_d (at_rsp()); + __ fdivrp(); + __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2())); + __ fmulp(); + __ bind(L_join); + break; + } + case rem: __ fld_d (at_rsp()); __ fremr(rax); break; + default : ShouldNotReachHere(); + } + __ d2ieee(); + // Pop double precision number from rsp. + __ pop(rax); + __ pop(rdx); +#endif +} + +void TemplateTable::ineg() { + transition(itos, itos); + __ negl(rax); +} + +void TemplateTable::lneg() { + transition(ltos, ltos); + LP64_ONLY(__ negq(rax)); + NOT_LP64(__ lneg(rdx, rax)); +} + +#ifdef _LP64 +// Note: 'double' and 'long long' have 32-bits alignment on x86. +static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { + // Use the expression (adr)&(~0xF) to provide 128-bits aligned address + // of 128-bits operands for SSE instructions. + jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF))); + // Store the value to a 128-bits operand. + operand[0] = lo; + operand[1] = hi; + return operand; +} + +// Buffer for 128-bits masks used by SSE instructions. +static jlong float_signflip_pool[2*2]; +static jlong double_signflip_pool[2*2]; +#endif + +void TemplateTable::fneg() { + transition(ftos, ftos); +#ifdef _LP64 + static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000); + __ xorps(xmm0, ExternalAddress((address) float_signflip)); +#else + __ fchs(); +#endif +} + +void TemplateTable::dneg() { + transition(dtos, dtos); +#ifdef _LP64 + static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000); + __ xorpd(xmm0, ExternalAddress((address) double_signflip)); +#else + __ fchs(); +#endif +} + +void TemplateTable::iinc() { + transition(vtos, vtos); + __ load_signed_byte(rdx, at_bcp(2)); // get constant + locals_index(rbx); + __ addl(iaddress(rbx), rdx); +} + +void TemplateTable::wide_iinc() { + transition(vtos, vtos); + __ movl(rdx, at_bcp(4)); // get constant + locals_index_wide(rbx); + __ bswapl(rdx); // swap bytes & sign-extend constant + __ sarl(rdx, 16); + __ addl(iaddress(rbx), rdx); + // Note: should probably use only one movl to get both + // the index and the constant -> fix this +} + +void TemplateTable::convert() { +#ifdef _LP64 + // Checking +#ifdef ASSERT + { + TosState tos_in = ilgl; + TosState tos_out = ilgl; + switch (bytecode()) { + case Bytecodes::_i2l: // fall through + case Bytecodes::_i2f: // fall through + case Bytecodes::_i2d: // fall through + case Bytecodes::_i2b: // fall through + case Bytecodes::_i2c: // fall through + case Bytecodes::_i2s: tos_in = itos; break; + case Bytecodes::_l2i: // fall through + case Bytecodes::_l2f: // fall through + case Bytecodes::_l2d: tos_in = ltos; break; + case Bytecodes::_f2i: // fall through + case Bytecodes::_f2l: // fall through + case Bytecodes::_f2d: tos_in = ftos; break; + case Bytecodes::_d2i: // fall through + case Bytecodes::_d2l: // fall through + case Bytecodes::_d2f: tos_in = dtos; break; + default : ShouldNotReachHere(); + } + switch (bytecode()) { + case Bytecodes::_l2i: // fall through + case Bytecodes::_f2i: // fall through + case Bytecodes::_d2i: // fall through + case Bytecodes::_i2b: // fall through + case Bytecodes::_i2c: // fall through + case Bytecodes::_i2s: tos_out = itos; break; + case Bytecodes::_i2l: // fall through + case Bytecodes::_f2l: // fall through + case Bytecodes::_d2l: tos_out = ltos; break; + case Bytecodes::_i2f: // fall through + case Bytecodes::_l2f: // fall through + case Bytecodes::_d2f: tos_out = ftos; break; + case Bytecodes::_i2d: // fall through + case Bytecodes::_l2d: // fall through + case Bytecodes::_f2d: tos_out = dtos; break; + default : ShouldNotReachHere(); + } + transition(tos_in, tos_out); + } +#endif // ASSERT + + static const int64_t is_nan = 0x8000000000000000L; + + // Conversion + switch (bytecode()) { + case Bytecodes::_i2l: + __ movslq(rax, rax); + break; + case Bytecodes::_i2f: + __ cvtsi2ssl(xmm0, rax); + break; + case Bytecodes::_i2d: + __ cvtsi2sdl(xmm0, rax); + break; + case Bytecodes::_i2b: + __ movsbl(rax, rax); + break; + case Bytecodes::_i2c: + __ movzwl(rax, rax); + break; + case Bytecodes::_i2s: + __ movswl(rax, rax); + break; + case Bytecodes::_l2i: + __ movl(rax, rax); + break; + case Bytecodes::_l2f: + __ cvtsi2ssq(xmm0, rax); + break; + case Bytecodes::_l2d: + __ cvtsi2sdq(xmm0, rax); + break; + case Bytecodes::_f2i: + { + Label L; + __ cvttss2sil(rax, xmm0); + __ cmpl(rax, 0x80000000); // NaN or overflow/underflow? + __ jcc(Assembler::notEqual, L); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1); + __ bind(L); + } + break; + case Bytecodes::_f2l: + { + Label L; + __ cvttss2siq(rax, xmm0); + // NaN or overflow/underflow? + __ cmp64(rax, ExternalAddress((address) &is_nan)); + __ jcc(Assembler::notEqual, L); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1); + __ bind(L); + } + break; + case Bytecodes::_f2d: + __ cvtss2sd(xmm0, xmm0); + break; + case Bytecodes::_d2i: + { + Label L; + __ cvttsd2sil(rax, xmm0); + __ cmpl(rax, 0x80000000); // NaN or overflow/underflow? + __ jcc(Assembler::notEqual, L); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1); + __ bind(L); + } + break; + case Bytecodes::_d2l: + { + Label L; + __ cvttsd2siq(rax, xmm0); + // NaN or overflow/underflow? + __ cmp64(rax, ExternalAddress((address) &is_nan)); + __ jcc(Assembler::notEqual, L); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1); + __ bind(L); + } + break; + case Bytecodes::_d2f: + __ cvtsd2ss(xmm0, xmm0); + break; + default: + ShouldNotReachHere(); + } +#else + // Checking +#ifdef ASSERT + { TosState tos_in = ilgl; + TosState tos_out = ilgl; + switch (bytecode()) { + case Bytecodes::_i2l: // fall through + case Bytecodes::_i2f: // fall through + case Bytecodes::_i2d: // fall through + case Bytecodes::_i2b: // fall through + case Bytecodes::_i2c: // fall through + case Bytecodes::_i2s: tos_in = itos; break; + case Bytecodes::_l2i: // fall through + case Bytecodes::_l2f: // fall through + case Bytecodes::_l2d: tos_in = ltos; break; + case Bytecodes::_f2i: // fall through + case Bytecodes::_f2l: // fall through + case Bytecodes::_f2d: tos_in = ftos; break; + case Bytecodes::_d2i: // fall through + case Bytecodes::_d2l: // fall through + case Bytecodes::_d2f: tos_in = dtos; break; + default : ShouldNotReachHere(); + } + switch (bytecode()) { + case Bytecodes::_l2i: // fall through + case Bytecodes::_f2i: // fall through + case Bytecodes::_d2i: // fall through + case Bytecodes::_i2b: // fall through + case Bytecodes::_i2c: // fall through + case Bytecodes::_i2s: tos_out = itos; break; + case Bytecodes::_i2l: // fall through + case Bytecodes::_f2l: // fall through + case Bytecodes::_d2l: tos_out = ltos; break; + case Bytecodes::_i2f: // fall through + case Bytecodes::_l2f: // fall through + case Bytecodes::_d2f: tos_out = ftos; break; + case Bytecodes::_i2d: // fall through + case Bytecodes::_l2d: // fall through + case Bytecodes::_f2d: tos_out = dtos; break; + default : ShouldNotReachHere(); + } + transition(tos_in, tos_out); + } +#endif // ASSERT + + // Conversion + // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation) + switch (bytecode()) { + case Bytecodes::_i2l: + __ extend_sign(rdx, rax); + break; + case Bytecodes::_i2f: + __ push(rax); // store int on tos + __ fild_s(at_rsp()); // load int to ST0 + __ f2ieee(); // truncate to float size + __ pop(rcx); // adjust rsp + break; + case Bytecodes::_i2d: + __ push(rax); // add one slot for d2ieee() + __ push(rax); // store int on tos + __ fild_s(at_rsp()); // load int to ST0 + __ d2ieee(); // truncate to double size + __ pop(rcx); // adjust rsp + __ pop(rcx); + break; + case Bytecodes::_i2b: + __ shll(rax, 24); // truncate upper 24 bits + __ sarl(rax, 24); // and sign-extend byte + LP64_ONLY(__ movsbl(rax, rax)); + break; + case Bytecodes::_i2c: + __ andl(rax, 0xFFFF); // truncate upper 16 bits + LP64_ONLY(__ movzwl(rax, rax)); + break; + case Bytecodes::_i2s: + __ shll(rax, 16); // truncate upper 16 bits + __ sarl(rax, 16); // and sign-extend short + LP64_ONLY(__ movswl(rax, rax)); + break; + case Bytecodes::_l2i: + /* nothing to do */ + break; + case Bytecodes::_l2f: + __ push(rdx); // store long on tos + __ push(rax); + __ fild_d(at_rsp()); // load long to ST0 + __ f2ieee(); // truncate to float size + __ pop(rcx); // adjust rsp + __ pop(rcx); + break; + case Bytecodes::_l2d: + __ push(rdx); // store long on tos + __ push(rax); + __ fild_d(at_rsp()); // load long to ST0 + __ d2ieee(); // truncate to double size + __ pop(rcx); // adjust rsp + __ pop(rcx); + break; + case Bytecodes::_f2i: + __ push(rcx); // reserve space for argument + __ fstp_s(at_rsp()); // pass float argument on stack + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1); + break; + case Bytecodes::_f2l: + __ push(rcx); // reserve space for argument + __ fstp_s(at_rsp()); // pass float argument on stack + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1); + break; + case Bytecodes::_f2d: + /* nothing to do */ + break; + case Bytecodes::_d2i: + __ push(rcx); // reserve space for argument + __ push(rcx); + __ fstp_d(at_rsp()); // pass double argument on stack + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2); + break; + case Bytecodes::_d2l: + __ push(rcx); // reserve space for argument + __ push(rcx); + __ fstp_d(at_rsp()); // pass double argument on stack + __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2); + break; + case Bytecodes::_d2f: + __ push(rcx); // reserve space for f2ieee() + __ f2ieee(); // truncate to float size + __ pop(rcx); // adjust rsp + break; + default : + ShouldNotReachHere(); + } +#endif +} + +void TemplateTable::lcmp() { + transition(ltos, itos); +#ifdef _LP64 + Label done; + __ pop_l(rdx); + __ cmpq(rdx, rax); + __ movl(rax, -1); + __ jccb(Assembler::less, done); + __ setb(Assembler::notEqual, rax); + __ movzbl(rax, rax); + __ bind(done); +#else + + // y = rdx:rax + __ pop_l(rbx, rcx); // get x = rcx:rbx + __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y) + __ mov(rax, rcx); +#endif +} + +void TemplateTable::float_cmp(bool is_float, int unordered_result) { +#ifdef _LP64 + Label done; + if (is_float) { + // XXX get rid of pop here, use ... reg, mem32 + __ pop_f(xmm1); + __ ucomiss(xmm1, xmm0); + } else { + // XXX get rid of pop here, use ... reg, mem64 + __ pop_d(xmm1); + __ ucomisd(xmm1, xmm0); + } + if (unordered_result < 0) { + __ movl(rax, -1); + __ jccb(Assembler::parity, done); + __ jccb(Assembler::below, done); + __ setb(Assembler::notEqual, rdx); + __ movzbl(rax, rdx); + } else { + __ movl(rax, 1); + __ jccb(Assembler::parity, done); + __ jccb(Assembler::above, done); + __ movl(rax, 0); + __ jccb(Assembler::equal, done); + __ decrementl(rax); + } + __ bind(done); +#else + if (is_float) { + __ fld_s(at_rsp()); + } else { + __ fld_d(at_rsp()); + __ pop(rdx); + } + __ pop(rcx); + __ fcmp2int(rax, unordered_result < 0); +#endif +} + +void TemplateTable::branch(bool is_jsr, bool is_wide) { + __ get_method(rcx); // rcx holds method + __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx + // holds bumped taken count + + const ByteSize be_offset = MethodCounters::backedge_counter_offset() + + InvocationCounter::counter_offset(); + const ByteSize inv_offset = MethodCounters::invocation_counter_offset() + + InvocationCounter::counter_offset(); + + // Load up edx with the branch displacement + if (is_wide) { + __ movl(rdx, at_bcp(1)); + } else { + __ load_signed_short(rdx, at_bcp(1)); + } + __ bswapl(rdx); + + if (!is_wide) { + __ sarl(rdx, 16); + } + LP64_ONLY(__ movl2ptr(rdx, rdx)); + + // Handle all the JSR stuff here, then exit. + // It's much shorter and cleaner than intermingling with the non-JSR + // normal-branch stuff occurring below. + if (is_jsr) { + // Pre-load the next target bytecode into rbx + __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1, 0)); + + // compute return address as bci in rax + __ lea(rax, at_bcp((is_wide ? 5 : 3) - + in_bytes(ConstMethod::codes_offset()))); + __ subptr(rax, Address(rcx, Method::const_offset())); + // Adjust the bcp in r13 by the displacement in rdx + __ addptr(rbcp, rdx); + // jsr returns atos that is not an oop + __ push_i(rax); + __ dispatch_only(vtos); + return; + } + + // Normal (non-jsr) branch handling + + // Adjust the bcp in r13 by the displacement in rdx + __ addptr(rbcp, rdx); + + assert(UseLoopCounter || !UseOnStackReplacement, + "on-stack-replacement requires loop counters"); + Label backedge_counter_overflow; + Label profile_method; + Label dispatch; + if (UseLoopCounter) { + // increment backedge counter for backward branches + // rax: MDO + // rbx: MDO bumped taken-count + // rcx: method + // rdx: target offset + // r13: target bcp + // r14: locals pointer + __ testl(rdx, rdx); // check if forward or backward branch + __ jcc(Assembler::positive, dispatch); // count only if backward branch + + // check if MethodCounters exists + Label has_counters; + __ movptr(rax, Address(rcx, Method::method_counters_offset())); + __ testptr(rax, rax); + __ jcc(Assembler::notZero, has_counters); + __ push(rdx); + __ push(rcx); + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), + rcx); + __ pop(rcx); + __ pop(rdx); + __ movptr(rax, Address(rcx, Method::method_counters_offset())); + __ jcc(Assembler::zero, dispatch); + __ bind(has_counters); + + if (TieredCompilation) { + Label no_mdo; + int increment = InvocationCounter::count_increment; + if (ProfileInterpreter) { + // Are we profiling? + __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset()))); + __ testptr(rbx, rbx); + __ jccb(Assembler::zero, no_mdo); + // Increment the MDO backedge counter + const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) + + in_bytes(InvocationCounter::counter_offset())); + const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset())); + __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, + rax, false, Assembler::zero, &backedge_counter_overflow); + __ jmp(dispatch); + } + __ bind(no_mdo); + // Increment backedge counter in MethodCounters* + __ movptr(rcx, Address(rcx, Method::method_counters_offset())); + const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset())); + __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask, + rax, false, Assembler::zero, &backedge_counter_overflow); + } else { // not TieredCompilation + // increment counter + __ movptr(rcx, Address(rcx, Method::method_counters_offset())); + __ movl(rax, Address(rcx, be_offset)); // load backedge counter + __ incrementl(rax, InvocationCounter::count_increment); // increment counter + __ movl(Address(rcx, be_offset), rax); // store counter + + __ movl(rax, Address(rcx, inv_offset)); // load invocation counter + + __ andl(rax, InvocationCounter::count_mask_value); // and the status bits + __ addl(rax, Address(rcx, be_offset)); // add both counters + + if (ProfileInterpreter) { + // Test to see if we should create a method data oop + __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); + __ jcc(Assembler::less, dispatch); + + // if no method data exists, go to profile method + __ test_method_data_pointer(rax, profile_method); + + if (UseOnStackReplacement) { + // check for overflow against rbx which is the MDO taken count + __ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()))); + __ jcc(Assembler::below, dispatch); + + // When ProfileInterpreter is on, the backedge_count comes + // from the MethodData*, which value does not get reset on + // the call to frequency_counter_overflow(). To avoid + // excessive calls to the overflow routine while the method is + // being compiled, add a second test to make sure the overflow + // function is called only once every overflow_frequency. + const int overflow_frequency = 1024; + __ andl(rbx, overflow_frequency - 1); + __ jcc(Assembler::zero, backedge_counter_overflow); + + } + } else { + if (UseOnStackReplacement) { + // check for overflow against rax, which is the sum of the + // counters + __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()))); + __ jcc(Assembler::aboveEqual, backedge_counter_overflow); + + } + } + } + __ bind(dispatch); + } + + // Pre-load the next target bytecode into rbx + __ load_unsigned_byte(rbx, Address(rbcp, 0)); + + // continue with the bytecode @ target + // rax: return bci for jsr's, unused otherwise + // rbx: target bytecode + // r13: target bcp + __ dispatch_only(vtos); + + if (UseLoopCounter) { + if (ProfileInterpreter) { + // Out-of-line code to allocate method data oop. + __ bind(profile_method); + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); + __ load_unsigned_byte(rbx, Address(rbcp, 0)); // restore target bytecode + __ set_method_data_pointer_for_bcp(); + __ jmp(dispatch); + } + + if (UseOnStackReplacement) { + // invocation counter overflow + __ bind(backedge_counter_overflow); + __ negptr(rdx); + __ addptr(rdx, rbcp); // branch bcp + // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp) + __ call_VM(noreg, + CAST_FROM_FN_PTR(address, + InterpreterRuntime::frequency_counter_overflow), + rdx); + __ load_unsigned_byte(rbx, Address(rbcp, 0)); // restore target bytecode + + // rax: osr nmethod (osr ok) or NULL (osr not possible) + // rbx: target bytecode + // rdx: scratch + // r14: locals pointer + // r13: bcp + __ testptr(rax, rax); // test result + __ jcc(Assembler::zero, dispatch); // no osr if null + // nmethod may have been invalidated (VM may block upon call_VM return) + __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use); + __ jcc(Assembler::notEqual, dispatch); + + // We have the address of an on stack replacement routine in rax + // We need to prepare to execute the OSR method. First we must + // migrate the locals and monitors off of the stack. + + LP64_ONLY(__ mov(r13, rax)); // save the nmethod + NOT_LP64(__ mov(rbx, rax)); // save the nmethod + NOT_LP64(__ get_thread(rcx)); + + call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); + + // rax is OSR buffer, move it to expected parameter location + LP64_ONLY(__ mov(j_rarg0, rax)); + NOT_LP64(__ mov(rcx, rax)); + // We use j_rarg definitions here so that registers don't conflict as parameter + // registers change across platforms as we are in the midst of a calling + // sequence to the OSR nmethod and we don't want collision. These are NOT parameters. + + const Register retaddr = LP64_ONLY(j_rarg2) NOT_LP64(rdi); + const Register sender_sp = LP64_ONLY(j_rarg1) NOT_LP64(rdx); + + + // pop the interpreter frame + __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp + __ leave(); // remove frame anchor + __ pop(retaddr); // get return address + __ mov(rsp, sender_sp); // set sp to sender sp + // Ensure compiled code always sees stack at proper alignment + __ andptr(rsp, -(StackAlignmentInBytes)); + + // unlike x86 we need no specialized return from compiled code + // to the interpreter or the call stub. + + // push the return address + __ push(retaddr); + + // and begin the OSR nmethod + LP64_ONLY(__ jmp(Address(r13, nmethod::osr_entry_point_offset()))); + NOT_LP64(__ jmp(Address(rbx, nmethod::osr_entry_point_offset()))); + } + } +} + +void TemplateTable::if_0cmp(Condition cc) { + transition(itos, vtos); + // assume branch is more often taken than not (loops use backward branches) + Label not_taken; + __ testl(rax, rax); + __ jcc(j_not(cc), not_taken); + branch(false, false); + __ bind(not_taken); + __ profile_not_taken_branch(rax); +} + +void TemplateTable::if_icmp(Condition cc) { + transition(itos, vtos); + // assume branch is more often taken than not (loops use backward branches) + Label not_taken; + __ pop_i(rdx); + __ cmpl(rdx, rax); + __ jcc(j_not(cc), not_taken); + branch(false, false); + __ bind(not_taken); + __ profile_not_taken_branch(rax); +} + +void TemplateTable::if_nullcmp(Condition cc) { + transition(atos, vtos); + // assume branch is more often taken than not (loops use backward branches) + Label not_taken; + __ testptr(rax, rax); + __ jcc(j_not(cc), not_taken); + branch(false, false); + __ bind(not_taken); + __ profile_not_taken_branch(rax); +} + +void TemplateTable::if_acmp(Condition cc) { + transition(atos, vtos); + // assume branch is more often taken than not (loops use backward branches) + Label not_taken; + __ pop_ptr(rdx); + __ cmpptr(rdx, rax); + __ jcc(j_not(cc), not_taken); + branch(false, false); + __ bind(not_taken); + __ profile_not_taken_branch(rax); +} + +void TemplateTable::ret() { + transition(vtos, vtos); + locals_index(rbx); + LP64_ONLY(__ movslq(rbx, iaddress(rbx))); // get return bci, compute return bcp + NOT_LP64(__ movptr(rbx, iaddress(rbx))); + __ profile_ret(rbx, rcx); + __ get_method(rax); + __ movptr(rbcp, Address(rax, Method::const_offset())); + __ lea(rbcp, Address(rbcp, rbx, Address::times_1, + ConstMethod::codes_offset())); + __ dispatch_next(vtos); +} + +void TemplateTable::wide_ret() { + transition(vtos, vtos); + locals_index_wide(rbx); + __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp + __ profile_ret(rbx, rcx); + __ get_method(rax); + __ movptr(rbcp, Address(rax, Method::const_offset())); + __ lea(rbcp, Address(rbcp, rbx, Address::times_1, ConstMethod::codes_offset())); + __ dispatch_next(vtos); +} + +void TemplateTable::tableswitch() { + Label default_case, continue_execution; + transition(itos, vtos); + + // align r13/rsi + __ lea(rbx, at_bcp(BytesPerInt)); + __ andptr(rbx, -BytesPerInt); + // load lo & hi + __ movl(rcx, Address(rbx, BytesPerInt)); + __ movl(rdx, Address(rbx, 2 * BytesPerInt)); + __ bswapl(rcx); + __ bswapl(rdx); + // check against lo & hi + __ cmpl(rax, rcx); + __ jcc(Assembler::less, default_case); + __ cmpl(rax, rdx); + __ jcc(Assembler::greater, default_case); + // lookup dispatch offset + __ subl(rax, rcx); + __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt)); + __ profile_switch_case(rax, rbx, rcx); + // continue execution + __ bind(continue_execution); + __ bswapl(rdx); + LP64_ONLY(__ movl2ptr(rdx, rdx)); + __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1)); + __ addptr(rbcp, rdx); + __ dispatch_only(vtos); + // handle default + __ bind(default_case); + __ profile_switch_default(rax); + __ movl(rdx, Address(rbx, 0)); + __ jmp(continue_execution); +} + +void TemplateTable::lookupswitch() { + transition(itos, itos); + __ stop("lookupswitch bytecode should have been rewritten"); +} + +void TemplateTable::fast_linearswitch() { + transition(itos, vtos); + Label loop_entry, loop, found, continue_execution; + // bswap rax so we can avoid bswapping the table entries + __ bswapl(rax); + // align r13 + __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of + // this instruction (change offsets + // below) + __ andptr(rbx, -BytesPerInt); + // set counter + __ movl(rcx, Address(rbx, BytesPerInt)); + __ bswapl(rcx); + __ jmpb(loop_entry); + // table search + __ bind(loop); + __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt)); + __ jcc(Assembler::equal, found); + __ bind(loop_entry); + __ decrementl(rcx); + __ jcc(Assembler::greaterEqual, loop); + // default case + __ profile_switch_default(rax); + __ movl(rdx, Address(rbx, 0)); + __ jmp(continue_execution); + // entry found -> get offset + __ bind(found); + __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt)); + __ profile_switch_case(rcx, rax, rbx); + // continue execution + __ bind(continue_execution); + __ bswapl(rdx); + __ movl2ptr(rdx, rdx); + __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1)); + __ addptr(rbcp, rdx); + __ dispatch_only(vtos); +} + +void TemplateTable::fast_binaryswitch() { + transition(itos, vtos); + // Implementation using the following core algorithm: + // + // int binary_search(int key, LookupswitchPair* array, int n) { + // // Binary search according to "Methodik des Programmierens" by + // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. + // int i = 0; + // int j = n; + // while (i+1 < j) { + // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) + // // with Q: for all i: 0 <= i < n: key < a[i] + // // where a stands for the array and assuming that the (inexisting) + // // element a[n] is infinitely big. + // int h = (i + j) >> 1; + // // i < h < j + // if (key < array[h].fast_match()) { + // j = h; + // } else { + // i = h; + // } + // } + // // R: a[i] <= key < a[i+1] or Q + // // (i.e., if key is within array, i is the correct index) + // return i; + // } + + // Register allocation + const Register key = rax; // already set (tosca) + const Register array = rbx; + const Register i = rcx; + const Register j = rdx; + const Register h = rdi; + const Register temp = rsi; + + // Find array start + NOT_LP64(__ save_bcp()); + + __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to + // get rid of this + // instruction (change + // offsets below) + __ andptr(array, -BytesPerInt); + + // Initialize i & j + __ xorl(i, i); // i = 0; + __ movl(j, Address(array, -BytesPerInt)); // j = length(array); + + // Convert j into native byteordering + __ bswapl(j); + + // And start + Label entry; + __ jmp(entry); + + // binary search loop + { + Label loop; + __ bind(loop); + // int h = (i + j) >> 1; + __ leal(h, Address(i, j, Address::times_1)); // h = i + j; + __ sarl(h, 1); // h = (i + j) >> 1; + // if (key < array[h].fast_match()) { + // j = h; + // } else { + // i = h; + // } + // Convert array[h].match to native byte-ordering before compare + __ movl(temp, Address(array, h, Address::times_8)); + __ bswapl(temp); + __ cmpl(key, temp); + // j = h if (key < array[h].fast_match()) + __ cmov32(Assembler::less, j, h); + // i = h if (key >= array[h].fast_match()) + __ cmov32(Assembler::greaterEqual, i, h); + // while (i+1 < j) + __ bind(entry); + __ leal(h, Address(i, 1)); // i+1 + __ cmpl(h, j); // i+1 < j + __ jcc(Assembler::less, loop); + } + + // end of binary search, result index is i (must check again!) + Label default_case; + // Convert array[i].match to native byte-ordering before compare + __ movl(temp, Address(array, i, Address::times_8)); + __ bswapl(temp); + __ cmpl(key, temp); + __ jcc(Assembler::notEqual, default_case); + + // entry found -> j = offset + __ movl(j , Address(array, i, Address::times_8, BytesPerInt)); + __ profile_switch_case(i, key, array); + __ bswapl(j); + LP64_ONLY(__ movslq(j, j)); + + NOT_LP64(__ restore_bcp()); + NOT_LP64(__ restore_locals()); // restore rdi + + __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1)); + __ addptr(rbcp, j); + __ dispatch_only(vtos); + + // default case -> j = default offset + __ bind(default_case); + __ profile_switch_default(i); + __ movl(j, Address(array, -2 * BytesPerInt)); + __ bswapl(j); + LP64_ONLY(__ movslq(j, j)); + + NOT_LP64(__ restore_bcp()); + NOT_LP64(__ restore_locals()); + + __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1)); + __ addptr(rbcp, j); + __ dispatch_only(vtos); +} + +void TemplateTable::_return(TosState state) { + transition(state, state); + + Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rax); + + assert(_desc->calls_vm(), + "inconsistent calls_vm information"); // call in remove_activation + + if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { + assert(state == vtos, "only valid state"); + __ movptr(robj, aaddress(0)); + __ load_klass(rdi, robj); + __ movl(rdi, Address(rdi, Klass::access_flags_offset())); + __ testl(rdi, JVM_ACC_HAS_FINALIZER); + Label skip_register_finalizer; + __ jcc(Assembler::zero, skip_register_finalizer); + + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), robj); + + __ bind(skip_register_finalizer); + } + + __ remove_activation(state, rbcp); + __ jmp(rbcp); +} + +// ---------------------------------------------------------------------------- +// Volatile variables demand their effects be made known to all CPU's +// in order. Store buffers on most chips allow reads & writes to +// reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode +// without some kind of memory barrier (i.e., it's not sufficient that +// the interpreter does not reorder volatile references, the hardware +// also must not reorder them). +// +// According to the new Java Memory Model (JMM): +// (1) All volatiles are serialized wrt to each other. ALSO reads & +// writes act as aquire & release, so: +// (2) A read cannot let unrelated NON-volatile memory refs that +// happen after the read float up to before the read. It's OK for +// non-volatile memory refs that happen before the volatile read to +// float down below it. +// (3) Similar a volatile write cannot let unrelated NON-volatile +// memory refs that happen BEFORE the write float down to after the +// write. It's OK for non-volatile memory refs that happen after the +// volatile write to float up before it. +// +// We only put in barriers around volatile refs (they are expensive), +// not _between_ memory refs (that would require us to track the +// flavor of the previous memory refs). Requirements (2) and (3) +// require some barriers before volatile stores and after volatile +// loads. These nearly cover requirement (1) but miss the +// volatile-store-volatile-load case. This final case is placed after +// volatile-stores although it could just as well go before +// volatile-loads. + +void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) { + // Helper function to insert a is-volatile test and memory barrier + if(!os::is_MP()) return; // Not needed on single CPU + __ membar(order_constraint); +} + +void TemplateTable::resolve_cache_and_index(int byte_no, + Register Rcache, + Register index, + size_t index_size) { + const Register temp = rbx; + assert_different_registers(Rcache, index, temp); + + Label resolved; + assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); + __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size); + __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode? + __ jcc(Assembler::equal, resolved); + + // resolve first time through + address entry; + switch (bytecode()) { + case Bytecodes::_getstatic : // fall through + case Bytecodes::_putstatic : // fall through + case Bytecodes::_getfield : // fall through + case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break; + case Bytecodes::_invokevirtual : // fall through + case Bytecodes::_invokespecial : // fall through + case Bytecodes::_invokestatic : // fall through + case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; + case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break; + case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; + default: + fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode()))); + break; + } + __ movl(temp, (int)bytecode()); + __ call_VM(noreg, entry, temp); + // Update registers with resolved info + __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); + __ bind(resolved); +} + +// The cache and index registers must be set before call +void TemplateTable::load_field_cp_cache_entry(Register obj, + Register cache, + Register index, + Register off, + Register flags, + bool is_static = false) { + assert_different_registers(cache, index, flags, off); + + ByteSize cp_base_offset = ConstantPoolCache::base_offset(); + // Field offset + __ movptr(off, Address(cache, index, Address::times_ptr, + in_bytes(cp_base_offset + + ConstantPoolCacheEntry::f2_offset()))); + // Flags + __ movl(flags, Address(cache, index, Address::times_ptr, + in_bytes(cp_base_offset + + ConstantPoolCacheEntry::flags_offset()))); + + // klass overwrite register + if (is_static) { + __ movptr(obj, Address(cache, index, Address::times_ptr, + in_bytes(cp_base_offset + + ConstantPoolCacheEntry::f1_offset()))); + const int mirror_offset = in_bytes(Klass::java_mirror_offset()); + __ movptr(obj, Address(obj, mirror_offset)); + } +} + +void TemplateTable::load_invoke_cp_cache_entry(int byte_no, + Register method, + Register itable_index, + Register flags, + bool is_invokevirtual, + bool is_invokevfinal, /*unused*/ + bool is_invokedynamic) { + // setup registers + const Register cache = rcx; + const Register index = rdx; + assert_different_registers(method, flags); + assert_different_registers(method, cache, index); + assert_different_registers(itable_index, flags); + assert_different_registers(itable_index, cache, index); + // determine constant pool cache field offsets + assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); + const int method_offset = in_bytes( + ConstantPoolCache::base_offset() + + ((byte_no == f2_byte) + ? ConstantPoolCacheEntry::f2_offset() + : ConstantPoolCacheEntry::f1_offset())); + const int flags_offset = in_bytes(ConstantPoolCache::base_offset() + + ConstantPoolCacheEntry::flags_offset()); + // access constant pool cache fields + const int index_offset = in_bytes(ConstantPoolCache::base_offset() + + ConstantPoolCacheEntry::f2_offset()); + + size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); + resolve_cache_and_index(byte_no, cache, index, index_size); + __ movptr(method, Address(cache, index, Address::times_ptr, method_offset)); + + if (itable_index != noreg) { + // pick up itable or appendix index from f2 also: + __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset)); + } + __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset)); +} + +// The registers cache and index expected to be set before call. +// Correct values of the cache and index registers are preserved. +void TemplateTable::jvmti_post_field_access(Register cache, + Register index, + bool is_static, + bool has_tos) { + if (JvmtiExport::can_post_field_access()) { + // Check to see if a field access watch has been set before we take + // the time to call into the VM. + Label L1; + assert_different_registers(cache, index, rax); + __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); + __ testl(rax,rax); + __ jcc(Assembler::zero, L1); + + // cache entry pointer + __ addptr(cache, in_bytes(ConstantPoolCache::base_offset())); + __ shll(index, LogBytesPerWord); + __ addptr(cache, index); + if (is_static) { + __ xorptr(rax, rax); // NULL object reference + } else { + __ pop(atos); // Get the object + __ verify_oop(rax); + __ push(atos); // Restore stack state + } + // rax,: object pointer or NULL + // cache: cache entry pointer + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), + rax, cache); + __ get_cache_and_index_at_bcp(cache, index, 1); + __ bind(L1); + } +} + +void TemplateTable::pop_and_check_object(Register r) { + __ pop_ptr(r); + __ null_check(r); // for field access must check obj. + __ verify_oop(r); +} + +void TemplateTable::getfield_or_static(int byte_no, bool is_static) { + transition(vtos, vtos); + + const Register cache = rcx; + const Register index = rdx; + const Register obj = LP64_ONLY(c_rarg3) NOT_LP64(rcx); + const Register off = rbx; + const Register flags = rax; + const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // uses same reg as obj, so don't mix them + + resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); + jvmti_post_field_access(cache, index, is_static, false); + load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); + + if (!is_static) pop_and_check_object(obj); + + const Address field(obj, off, Address::times_1, 0*wordSize); + NOT_LP64(const Address hi(obj, off, Address::times_1, 1*wordSize)); + + Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble; + + __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); + // Make sure we don't need to mask edx after the above shift + assert(btos == 0, "change code, btos != 0"); + + __ andl(flags, ConstantPoolCacheEntry::tos_state_mask); + + __ jcc(Assembler::notZero, notByte); + // btos + __ load_signed_byte(rax, field); + __ push(btos); + // Rewrite bytecode to be faster + if (!is_static) { + patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx); + } + __ jmp(Done); + + __ bind(notByte); + __ cmpl(flags, atos); + __ jcc(Assembler::notEqual, notObj); + // atos + __ load_heap_oop(rax, field); + __ push(atos); + if (!is_static) { + patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx); + } + __ jmp(Done); + + __ bind(notObj); + __ cmpl(flags, itos); + __ jcc(Assembler::notEqual, notInt); + // itos + __ movl(rax, field); + __ push(itos); + // Rewrite bytecode to be faster + if (!is_static) { + patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx); + } + __ jmp(Done); + + __ bind(notInt); + __ cmpl(flags, ctos); + __ jcc(Assembler::notEqual, notChar); + // ctos + __ load_unsigned_short(rax, field); + __ push(ctos); + // Rewrite bytecode to be faster + if (!is_static) { + patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx); + } + __ jmp(Done); + + __ bind(notChar); + __ cmpl(flags, stos); + __ jcc(Assembler::notEqual, notShort); + // stos + __ load_signed_short(rax, field); + __ push(stos); + // Rewrite bytecode to be faster + if (!is_static) { + patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx); + } + __ jmp(Done); + + __ bind(notShort); + __ cmpl(flags, ltos); + __ jcc(Assembler::notEqual, notLong); + // ltos + +#ifndef _LP64 + // Generate code as if volatile. There just aren't enough registers to + // save that information and this code is faster than the test. + __ fild_d(field); // Must load atomically + __ subptr(rsp,2*wordSize); // Make space for store + __ fistp_d(Address(rsp,0)); + __ pop(rax); + __ pop(rdx); +#else + __ movq(rax, field); +#endif + + __ push(ltos); + // Rewrite bytecode to be faster + LP64_ONLY(if (!is_static) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx)); + __ jmp(Done); + + __ bind(notLong); + __ cmpl(flags, ftos); + __ jcc(Assembler::notEqual, notFloat); + // ftos + + LP64_ONLY(__ movflt(xmm0, field)); + NOT_LP64(__ fld_s(field)); + __ push(ftos); + // Rewrite bytecode to be faster + if (!is_static) { + patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx); + } + __ jmp(Done); + + __ bind(notFloat); +#ifdef ASSERT + __ cmpl(flags, dtos); + __ jcc(Assembler::notEqual, notDouble); +#endif + // dtos + LP64_ONLY(__ movdbl(xmm0, field)); + NOT_LP64(__ fld_d(field)); + __ push(dtos); + // Rewrite bytecode to be faster + if (!is_static) { + patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx); + } +#ifdef ASSERT + __ jmp(Done); + + + __ bind(notDouble); + __ stop("Bad state"); +#endif + + __ bind(Done); + // [jk] not needed currently + // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad | + // Assembler::LoadStore)); +} + +void TemplateTable::getfield(int byte_no) { + getfield_or_static(byte_no, false); +} + +void TemplateTable::getstatic(int byte_no) { + getfield_or_static(byte_no, true); +} + + +// The registers cache and index expected to be set before call. +// The function may destroy various registers, just not the cache and index registers. +void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { + + const Register robj = LP64_ONLY(c_rarg2) NOT_LP64(rax); + const Register RBX = LP64_ONLY(c_rarg1) NOT_LP64(rbx); + const Register RCX = LP64_ONLY(c_rarg3) NOT_LP64(rcx); + const Register RDX = LP64_ONLY(rscratch1) NOT_LP64(rdx); + + ByteSize cp_base_offset = ConstantPoolCache::base_offset(); + + if (JvmtiExport::can_post_field_modification()) { + // Check to see if a field modification watch has been set before + // we take the time to call into the VM. + Label L1; + assert_different_registers(cache, index, rax); + __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); + __ testl(rax, rax); + __ jcc(Assembler::zero, L1); + + __ get_cache_and_index_at_bcp(robj, RDX, 1); + + + if (is_static) { + // Life is simple. Null out the object pointer. + __ xorl(RBX, RBX); + + } else { + // Life is harder. The stack holds the value on top, followed by + // the object. We don't know the size of the value, though; it + // could be one or two words depending on its type. As a result, + // we must find the type to determine where the object is. +#ifndef _LP64 + Label two_word, valsize_known; +#endif + __ movl(RCX, Address(robj, RDX, + Address::times_ptr, + in_bytes(cp_base_offset + + ConstantPoolCacheEntry::flags_offset()))); + NOT_LP64(__ mov(rbx, rsp)); + __ shrl(RCX, ConstantPoolCacheEntry::tos_state_shift); + + // Make sure we don't need to mask rcx after the above shift + ConstantPoolCacheEntry::verify_tos_state_shift(); +#ifdef _LP64 + __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue + __ cmpl(c_rarg3, ltos); + __ cmovptr(Assembler::equal, + c_rarg1, at_tos_p2()); // ltos (two word jvalue) + __ cmpl(c_rarg3, dtos); + __ cmovptr(Assembler::equal, + c_rarg1, at_tos_p2()); // dtos (two word jvalue) +#else + __ cmpl(rcx, ltos); + __ jccb(Assembler::equal, two_word); + __ cmpl(rcx, dtos); + __ jccb(Assembler::equal, two_word); + __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos) + __ jmpb(valsize_known); + + __ bind(two_word); + __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue + + __ bind(valsize_known); + // setup object pointer + __ movptr(rbx, Address(rbx, 0)); +#endif + } + // cache entry pointer + __ addptr(robj, in_bytes(cp_base_offset)); + __ shll(RDX, LogBytesPerWord); + __ addptr(robj, RDX); + // object (tos) + __ mov(RCX, rsp); + // c_rarg1: object pointer set up above (NULL if static) + // c_rarg2: cache entry pointer + // c_rarg3: jvalue object on the stack + __ call_VM(noreg, + CAST_FROM_FN_PTR(address, + InterpreterRuntime::post_field_modification), + RBX, robj, RCX); + __ get_cache_and_index_at_bcp(cache, index, 1); + __ bind(L1); + } +} + +void TemplateTable::putfield_or_static(int byte_no, bool is_static) { + transition(vtos, vtos); + + const Register cache = rcx; + const Register index = rdx; + const Register obj = rcx; + const Register off = rbx; + const Register flags = rax; + const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx); + + resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); + jvmti_post_field_mod(cache, index, is_static); + load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); + + // [jk] not needed currently + // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | + // Assembler::StoreStore)); + + Label notVolatile, Done; + __ movl(rdx, flags); + __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); + __ andl(rdx, 0x1); + + // field addresses + const Address field(obj, off, Address::times_1, 0*wordSize); + NOT_LP64( const Address hi(obj, off, Address::times_1, 1*wordSize);) + + Label notByte, notInt, notShort, notChar, + notLong, notFloat, notObj, notDouble; + + __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); + + assert(btos == 0, "change code, btos != 0"); + __ andl(flags, ConstantPoolCacheEntry::tos_state_mask); + __ jcc(Assembler::notZero, notByte); + + // btos + { + __ pop(btos); + if (!is_static) pop_and_check_object(obj); + __ movb(field, rax); + if (!is_static) { + patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no); + } + __ jmp(Done); + } + + __ bind(notByte); + __ cmpl(flags, atos); + __ jcc(Assembler::notEqual, notObj); + + // atos + { + __ pop(atos); + if (!is_static) pop_and_check_object(obj); + // Store into the field + do_oop_store(_masm, field, rax, _bs->kind(), false); + if (!is_static) { + patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no); + } + __ jmp(Done); + } + + __ bind(notObj); + __ cmpl(flags, itos); + __ jcc(Assembler::notEqual, notInt); + + // itos + { + __ pop(itos); + if (!is_static) pop_and_check_object(obj); + __ movl(field, rax); + if (!is_static) { + patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no); + } + __ jmp(Done); + } + + __ bind(notInt); + __ cmpl(flags, ctos); + __ jcc(Assembler::notEqual, notChar); + + // ctos + { + __ pop(ctos); + if (!is_static) pop_and_check_object(obj); + __ movw(field, rax); + if (!is_static) { + patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no); + } + __ jmp(Done); + } + + __ bind(notChar); + __ cmpl(flags, stos); + __ jcc(Assembler::notEqual, notShort); + + // stos + { + __ pop(stos); + if (!is_static) pop_and_check_object(obj); + __ movw(field, rax); + if (!is_static) { + patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no); + } + __ jmp(Done); + } + + __ bind(notShort); + __ cmpl(flags, ltos); + __ jcc(Assembler::notEqual, notLong); + + // ltos +#ifdef _LP64 + { + __ pop(ltos); + if (!is_static) pop_and_check_object(obj); + __ movq(field, rax); + if (!is_static) { + patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no); + } + __ jmp(Done); + } +#else + { + Label notVolatileLong; + __ testl(rdx, rdx); + __ jcc(Assembler::zero, notVolatileLong); + + __ pop(ltos); // overwrites rdx, do this after testing volatile. + if (!is_static) pop_and_check_object(obj); + + // Replace with real volatile test + __ push(rdx); + __ push(rax); // Must update atomically with FIST + __ fild_d(Address(rsp,0)); // So load into FPU register + __ fistp_d(field); // and put into memory atomically + __ addptr(rsp, 2*wordSize); + // volatile_barrier(); + volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | + Assembler::StoreStore)); + // Don't rewrite volatile version + __ jmp(notVolatile); + + __ bind(notVolatileLong); + + __ pop(ltos); // overwrites rdx + if (!is_static) pop_and_check_object(obj); + __ movptr(hi, rdx); + __ movptr(field, rax); + // Don't rewrite to _fast_lputfield for potential volatile case. + __ jmp(notVolatile); + } +#endif // _LP64 + + __ bind(notLong); + __ cmpl(flags, ftos); + __ jcc(Assembler::notEqual, notFloat); + + // ftos + { + __ pop(ftos); + if (!is_static) pop_and_check_object(obj); + NOT_LP64( __ fstp_s(field);) + LP64_ONLY( __ movflt(field, xmm0);) + if (!is_static) { + patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no); + } + __ jmp(Done); + } + + __ bind(notFloat); +#ifdef ASSERT + __ cmpl(flags, dtos); + __ jcc(Assembler::notEqual, notDouble); +#endif + + // dtos + { + __ pop(dtos); + if (!is_static) pop_and_check_object(obj); + NOT_LP64( __ fstp_d(field);) + LP64_ONLY( __ movdbl(field, xmm0);) + if (!is_static) { + patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no); + } + } + +#ifdef ASSERT + __ jmp(Done); + + __ bind(notDouble); + __ stop("Bad state"); +#endif + + __ bind(Done); + + // Check for volatile store + __ testl(rdx, rdx); + __ jcc(Assembler::zero, notVolatile); + volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | + Assembler::StoreStore)); + __ bind(notVolatile); +} + +void TemplateTable::putfield(int byte_no) { + putfield_or_static(byte_no, false); +} + +void TemplateTable::putstatic(int byte_no) { + putfield_or_static(byte_no, true); +} + +void TemplateTable::jvmti_post_fast_field_mod() { + + const Register scratch = LP64_ONLY(c_rarg3) NOT_LP64(rcx); + + if (JvmtiExport::can_post_field_modification()) { + // Check to see if a field modification watch has been set before + // we take the time to call into the VM. + Label L2; + __ mov32(scratch, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); + __ testl(scratch, scratch); + __ jcc(Assembler::zero, L2); + __ pop_ptr(rbx); // copy the object pointer from tos + __ verify_oop(rbx); + __ push_ptr(rbx); // put the object pointer back on tos + // Save tos values before call_VM() clobbers them. Since we have + // to do it for every data type, we use the saved values as the + // jvalue object. + switch (bytecode()) { // load values into the jvalue object + case Bytecodes::_fast_aputfield: __ push_ptr(rax); break; + case Bytecodes::_fast_bputfield: // fall through + case Bytecodes::_fast_sputfield: // fall through + case Bytecodes::_fast_cputfield: // fall through + case Bytecodes::_fast_iputfield: __ push_i(rax); break; + case Bytecodes::_fast_dputfield: __ push_d(); break; + case Bytecodes::_fast_fputfield: __ push_f(); break; + case Bytecodes::_fast_lputfield: __ push_l(rax); break; + + default: + ShouldNotReachHere(); + } + __ mov(scratch, rsp); // points to jvalue on the stack + // access constant pool cache entry + LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1)); + NOT_LP64(__ get_cache_entry_pointer_at_bcp(rax, rdx, 1)); + __ verify_oop(rbx); + // rbx: object pointer copied above + // c_rarg2: cache entry pointer + // c_rarg3: jvalue object on the stack + LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, c_rarg2, c_rarg3)); + NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx)); + + switch (bytecode()) { // restore tos values + case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break; + case Bytecodes::_fast_bputfield: // fall through + case Bytecodes::_fast_sputfield: // fall through + case Bytecodes::_fast_cputfield: // fall through + case Bytecodes::_fast_iputfield: __ pop_i(rax); break; + case Bytecodes::_fast_dputfield: __ pop_d(); break; + case Bytecodes::_fast_fputfield: __ pop_f(); break; + case Bytecodes::_fast_lputfield: __ pop_l(rax); break; + } + __ bind(L2); + } +} + +void TemplateTable::fast_storefield(TosState state) { + transition(state, vtos); + + ByteSize base = ConstantPoolCache::base_offset(); + + jvmti_post_fast_field_mod(); + + // access constant pool cache + __ get_cache_and_index_at_bcp(rcx, rbx, 1); + + // test for volatile with rdx but rdx is tos register for lputfield. + __ movl(rdx, Address(rcx, rbx, Address::times_ptr, + in_bytes(base + + ConstantPoolCacheEntry::flags_offset()))); + + // replace index with field offset from cache entry + __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, + in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); + + // [jk] not needed currently + // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | + // Assembler::StoreStore)); + + Label notVolatile; + __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); + __ andl(rdx, 0x1); + + // Get object from stack + pop_and_check_object(rcx); + + // field address + const Address field(rcx, rbx, Address::times_1); + + // access field + switch (bytecode()) { + case Bytecodes::_fast_aputfield: + do_oop_store(_masm, field, rax, _bs->kind(), false); + break; + case Bytecodes::_fast_lputfield: +#ifdef _LP64 + __ movq(field, rax); +#else + __ stop("should not be rewritten"); +#endif + break; + case Bytecodes::_fast_iputfield: + __ movl(field, rax); + break; + case Bytecodes::_fast_bputfield: + __ movb(field, rax); + break; + case Bytecodes::_fast_sputfield: + // fall through + case Bytecodes::_fast_cputfield: + __ movw(field, rax); + break; + case Bytecodes::_fast_fputfield: + NOT_LP64( __ fstp_s(field); ) + LP64_ONLY( __ movflt(field, xmm0);) + break; + case Bytecodes::_fast_dputfield: + NOT_LP64( __ fstp_d(field); ) + LP64_ONLY( __ movdbl(field, xmm0);) + break; + default: + ShouldNotReachHere(); + } + + // Check for volatile store + __ testl(rdx, rdx); + __ jcc(Assembler::zero, notVolatile); + volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | + Assembler::StoreStore)); + __ bind(notVolatile); +} + +void TemplateTable::fast_accessfield(TosState state) { + transition(atos, state); + + // Do the JVMTI work here to avoid disturbing the register state below + if (JvmtiExport::can_post_field_access()) { + // Check to see if a field access watch has been set before we + // take the time to call into the VM. + Label L1; + __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); + __ testl(rcx, rcx); + __ jcc(Assembler::zero, L1); + // access constant pool cache entry + LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1)); + NOT_LP64(__ get_cache_entry_pointer_at_bcp(rcx, rdx, 1)); + __ verify_oop(rax); + __ push_ptr(rax); // save object pointer before call_VM() clobbers it + LP64_ONLY(__ mov(c_rarg1, rax)); + // c_rarg1: object pointer copied above + // c_rarg2: cache entry pointer + LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), c_rarg1, c_rarg2)); + NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx)); + __ pop_ptr(rax); // restore object pointer + __ bind(L1); + } + + // access constant pool cache + __ get_cache_and_index_at_bcp(rcx, rbx, 1); + // replace index with field offset from cache entry + // [jk] not needed currently + // if (os::is_MP()) { + // __ movl(rdx, Address(rcx, rbx, Address::times_8, + // in_bytes(ConstantPoolCache::base_offset() + + // ConstantPoolCacheEntry::flags_offset()))); + // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); + // __ andl(rdx, 0x1); + // } + __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, + in_bytes(ConstantPoolCache::base_offset() + + ConstantPoolCacheEntry::f2_offset()))); + + // rax: object + __ verify_oop(rax); + __ null_check(rax); + Address field(rax, rbx, Address::times_1); + + // access field + switch (bytecode()) { + case Bytecodes::_fast_agetfield: + __ load_heap_oop(rax, field); + __ verify_oop(rax); + break; + case Bytecodes::_fast_lgetfield: +#ifdef _LP64 + __ movq(rax, field); +#else + __ stop("should not be rewritten"); +#endif + break; + case Bytecodes::_fast_igetfield: + __ movl(rax, field); + break; + case Bytecodes::_fast_bgetfield: + __ movsbl(rax, field); + break; + case Bytecodes::_fast_sgetfield: + __ load_signed_short(rax, field); + break; + case Bytecodes::_fast_cgetfield: + __ load_unsigned_short(rax, field); + break; + case Bytecodes::_fast_fgetfield: + LP64_ONLY(__ movflt(xmm0, field)); + NOT_LP64(__ fld_s(field)); + break; + case Bytecodes::_fast_dgetfield: + LP64_ONLY(__ movdbl(xmm0, field)); + NOT_LP64(__ fld_d(field)); + break; + default: + ShouldNotReachHere(); + } + // [jk] not needed currently + // if (os::is_MP()) { + // Label notVolatile; + // __ testl(rdx, rdx); + // __ jcc(Assembler::zero, notVolatile); + // __ membar(Assembler::LoadLoad); + // __ bind(notVolatile); + //}; +} + +void TemplateTable::fast_xaccess(TosState state) { + transition(vtos, state); + + // get receiver + __ movptr(rax, aaddress(0)); + // access constant pool cache + __ get_cache_and_index_at_bcp(rcx, rdx, 2); + __ movptr(rbx, + Address(rcx, rdx, Address::times_ptr, + in_bytes(ConstantPoolCache::base_offset() + + ConstantPoolCacheEntry::f2_offset()))); + // make sure exception is reported in correct bcp range (getfield is + // next instruction) + __ increment(rbcp); + __ null_check(rax); + const Address field = Address(rax, rbx, Address::times_1, 0*wordSize); + switch (state) { + case itos: + __ movl(rax, field); + break; + case atos: + __ load_heap_oop(rax, field); + __ verify_oop(rax); + break; + case ftos: + LP64_ONLY(__ movflt(xmm0, field)); + NOT_LP64(__ fld_s(field)); + break; + default: + ShouldNotReachHere(); + } + + // [jk] not needed currently + // if (os::is_MP()) { + // Label notVolatile; + // __ movl(rdx, Address(rcx, rdx, Address::times_8, + // in_bytes(ConstantPoolCache::base_offset() + + // ConstantPoolCacheEntry::flags_offset()))); + // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); + // __ testl(rdx, 0x1); + // __ jcc(Assembler::zero, notVolatile); + // __ membar(Assembler::LoadLoad); + // __ bind(notVolatile); + // } + + __ decrement(rbcp); +} + +//----------------------------------------------------------------------------- +// Calls + +void TemplateTable::count_calls(Register method, Register temp) { + // implemented elsewhere + ShouldNotReachHere(); +} + +void TemplateTable::prepare_invoke(int byte_no, + Register method, // linked method (or i-klass) + Register index, // itable index, MethodType, etc. + Register recv, // if caller wants to see it + Register flags // if caller wants to test it + ) { + // determine flags + const Bytecodes::Code code = bytecode(); + const bool is_invokeinterface = code == Bytecodes::_invokeinterface; + const bool is_invokedynamic = code == Bytecodes::_invokedynamic; + const bool is_invokehandle = code == Bytecodes::_invokehandle; + const bool is_invokevirtual = code == Bytecodes::_invokevirtual; + const bool is_invokespecial = code == Bytecodes::_invokespecial; + const bool load_receiver = (recv != noreg); + const bool save_flags = (flags != noreg); + assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); + assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal"); + assert(flags == noreg || flags == rdx, ""); + assert(recv == noreg || recv == rcx, ""); + + // setup registers & access constant pool cache + if (recv == noreg) recv = rcx; + if (flags == noreg) flags = rdx; + assert_different_registers(method, index, recv, flags); + + // save 'interpreter return address' + __ save_bcp(); + + load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); + + // maybe push appendix to arguments (just before return address) + if (is_invokedynamic || is_invokehandle) { + Label L_no_push; + __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift)); + __ jcc(Assembler::zero, L_no_push); + // Push the appendix as a trailing parameter. + // This must be done before we get the receiver, + // since the parameter_size includes it. + __ push(rbx); + __ mov(rbx, index); + assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0"); + __ load_resolved_reference_at_index(index, rbx); + __ pop(rbx); + __ push(index); // push appendix (MethodType, CallSite, etc.) + __ bind(L_no_push); + } + + // load receiver if needed (after appendix is pushed so parameter size is correct) + // Note: no return address pushed yet + if (load_receiver) { + __ movl(recv, flags); + __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask); + const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address + const int receiver_is_at_end = -1; // back off one slot to get receiver + Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end); + __ movptr(recv, recv_addr); + __ verify_oop(recv); + } + + if (save_flags) { + __ movl(rbcp, flags); + } + + // compute return type + __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); + // Make sure we don't need to mask flags after the above shift + ConstantPoolCacheEntry::verify_tos_state_shift(); + // load return address + { + const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); + ExternalAddress table(table_addr); + LP64_ONLY(__ lea(rscratch1, table)); + LP64_ONLY(__ movptr(flags, Address(rscratch1, flags, Address::times_ptr))); + NOT_LP64(__ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)))); + } + + // push return address + __ push(flags); + + // Restore flags value from the constant pool cache, and restore rsi + // for later null checks. r13 is the bytecode pointer + if (save_flags) { + __ movl(flags, rbcp); + __ restore_bcp(); + } +} + +void TemplateTable::invokevirtual_helper(Register index, + Register recv, + Register flags) { + // Uses temporary registers rax, rdx + assert_different_registers(index, recv, rax, rdx); + assert(index == rbx, ""); + assert(recv == rcx, ""); + + // Test for an invoke of a final method + Label notFinal; + __ movl(rax, flags); + __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift)); + __ jcc(Assembler::zero, notFinal); + + const Register method = index; // method must be rbx + assert(method == rbx, + "Method* must be rbx for interpreter calling convention"); + + // do the call - the index is actually the method to call + // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method* + + // It's final, need a null check here! + __ null_check(recv); + + // profile this call + __ profile_final_call(rax); + __ profile_arguments_type(rax, method, rbcp, true); + + __ jump_from_interpreted(method, rax); + + __ bind(notFinal); + + // get receiver klass + __ null_check(recv, oopDesc::klass_offset_in_bytes()); + __ load_klass(rax, recv); + + // profile this call + __ profile_virtual_call(rax, rlocals, rdx); + // get target Method* & entry point + __ lookup_virtual_method(rax, index, method); + __ profile_arguments_type(rdx, method, rbcp, true); + __ jump_from_interpreted(method, rdx); +} + +void TemplateTable::invokevirtual(int byte_no) { + transition(vtos, vtos); + assert(byte_no == f2_byte, "use this argument"); + prepare_invoke(byte_no, + rbx, // method or vtable index + noreg, // unused itable index + rcx, rdx); // recv, flags + + // rbx: index + // rcx: receiver + // rdx: flags + + invokevirtual_helper(rbx, rcx, rdx); +} + +void TemplateTable::invokespecial(int byte_no) { + transition(vtos, vtos); + assert(byte_no == f1_byte, "use this argument"); + prepare_invoke(byte_no, rbx, noreg, // get f1 Method* + rcx); // get receiver also for null check + __ verify_oop(rcx); + __ null_check(rcx); + // do the call + __ profile_call(rax); + __ profile_arguments_type(rax, rbx, rbcp, false); + __ jump_from_interpreted(rbx, rax); +} + +void TemplateTable::invokestatic(int byte_no) { + transition(vtos, vtos); + assert(byte_no == f1_byte, "use this argument"); + prepare_invoke(byte_no, rbx); // get f1 Method* + // do the call + __ profile_call(rax); + __ profile_arguments_type(rax, rbx, rbcp, false); + __ jump_from_interpreted(rbx, rax); +} + + +void TemplateTable::fast_invokevfinal(int byte_no) { + transition(vtos, vtos); + assert(byte_no == f2_byte, "use this argument"); + __ stop("fast_invokevfinal not used on x86"); +} + + +void TemplateTable::invokeinterface(int byte_no) { + transition(vtos, vtos); + assert(byte_no == f1_byte, "use this argument"); + prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 itable index + rcx, rdx); // recv, flags + + // rax: interface klass (from f1) + // rbx: itable index (from f2) + // rcx: receiver + // rdx: flags + + // Special case of invokeinterface called for virtual method of + // java.lang.Object. See cpCacheOop.cpp for details. + // This code isn't produced by javac, but could be produced by + // another compliant java compiler. + Label notMethod; + __ movl(rlocals, rdx); + __ andl(rlocals, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift)); + + __ jcc(Assembler::zero, notMethod); + + invokevirtual_helper(rbx, rcx, rdx); + __ bind(notMethod); + + // Get receiver klass into rdx - also a null check + __ restore_locals(); // restore r14 + __ null_check(rcx, oopDesc::klass_offset_in_bytes()); + __ load_klass(rdx, rcx); + + // profile this call + __ profile_virtual_call(rdx, rbcp, rlocals); + + Label no_such_interface, no_such_method; + + __ lookup_interface_method(// inputs: rec. class, interface, itable index + rdx, rax, rbx, + // outputs: method, scan temp. reg + rbx, rbcp, + no_such_interface); + + // rbx: Method* to call + // rcx: receiver + // Check for abstract method error + // Note: This should be done more efficiently via a throw_abstract_method_error + // interpreter entry point and a conditional jump to it in case of a null + // method. + __ testptr(rbx, rbx); + __ jcc(Assembler::zero, no_such_method); + + __ profile_arguments_type(rdx, rbx, rbcp, true); + + // do the call + // rcx: receiver + // rbx,: Method* + __ jump_from_interpreted(rbx, rdx); + __ should_not_reach_here(); + + // exception handling code follows... + // note: must restore interpreter registers to canonical + // state for exception handling to work correctly! + + __ bind(no_such_method); + // throw exception + __ pop(rbx); // pop return address (pushed by prepare_invoke) + __ restore_bcp(); // rbcp must be correct for exception handler (was destroyed) + __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); + // the call_VM checks for exception, so we should never return here. + __ should_not_reach_here(); + + __ bind(no_such_interface); + // throw exception + __ pop(rbx); // pop return address (pushed by prepare_invoke) + __ restore_bcp(); // rbcp must be correct for exception handler (was destroyed) + __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) + __ call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::throw_IncompatibleClassChangeError)); + // the call_VM checks for exception, so we should never return here. + __ should_not_reach_here(); +} + +void TemplateTable::invokehandle(int byte_no) { + transition(vtos, vtos); + assert(byte_no == f1_byte, "use this argument"); + const Register rbx_method = rbx; + const Register rax_mtype = rax; + const Register rcx_recv = rcx; + const Register rdx_flags = rdx; + + prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv); + __ verify_method_ptr(rbx_method); + __ verify_oop(rcx_recv); + __ null_check(rcx_recv); + + // rax: MethodType object (from cpool->resolved_references[f1], if necessary) + // rbx: MH.invokeExact_MT method (from f2) + + // Note: rax_mtype is already pushed (if necessary) by prepare_invoke + + // FIXME: profile the LambdaForm also + __ profile_final_call(rax); + __ profile_arguments_type(rdx, rbx_method, rbcp, true); + + __ jump_from_interpreted(rbx_method, rdx); +} + +void TemplateTable::invokedynamic(int byte_no) { + transition(vtos, vtos); + assert(byte_no == f1_byte, "use this argument"); + + const Register rbx_method = rbx; + const Register rax_callsite = rax; + + prepare_invoke(byte_no, rbx_method, rax_callsite); + + // rax: CallSite object (from cpool->resolved_references[f1]) + // rbx: MH.linkToCallSite method (from f2) + + // Note: rax_callsite is already pushed by prepare_invoke + + // %%% should make a type profile for any invokedynamic that takes a ref argument + // profile this call + __ profile_call(rbcp); + __ profile_arguments_type(rdx, rbx_method, rbcp, false); + + __ verify_oop(rax_callsite); + + __ jump_from_interpreted(rbx_method, rdx); +} + +//----------------------------------------------------------------------------- +// Allocation + +void TemplateTable::_new() { + transition(vtos, atos); + __ get_unsigned_2_byte_index_at_bcp(rdx, 1); + Label slow_case; + Label slow_case_no_pop; + Label done; + Label initialize_header; + Label initialize_object; // including clearing the fields + Label allocate_shared; + + __ get_cpool_and_tags(rcx, rax); + + // Make sure the class we're about to instantiate has been resolved. + // This is done before loading InstanceKlass to be consistent with the order + // how Constant Pool is updated (see ConstantPool::klass_at_put) + const int tags_offset = Array::base_offset_in_bytes(); + __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class); + __ jcc(Assembler::notEqual, slow_case_no_pop); + + // get InstanceKlass + __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(ConstantPool))); + __ push(rcx); // save the contexts of klass for initializing the header + + // make sure klass is initialized & doesn't have finalizer + // make sure klass is fully initialized + __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); + __ jcc(Assembler::notEqual, slow_case); + + // get instance_size in InstanceKlass (scaled to a count of bytes) + __ movl(rdx, Address(rcx, Klass::layout_helper_offset())); + // test to see if it has a finalizer or is malformed in some way + __ testl(rdx, Klass::_lh_instance_slow_path_bit); + __ jcc(Assembler::notZero, slow_case); + + // + // Allocate the instance + // 1) Try to allocate in the TLAB + // 2) if fail and the object is large allocate in the shared Eden + // 3) if the above fails (or is not applicable), go to a slow case + // (creates a new TLAB, etc.) + + const bool allow_shared_alloc = + Universe::heap()->supports_inline_contig_alloc(); + + const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx); +#ifndef _LP64 + if (UseTLAB || allow_shared_alloc) { + __ get_thread(thread); + } +#endif // _LP64 + + if (UseTLAB) { + __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset()))); + __ lea(rbx, Address(rax, rdx, Address::times_1)); + __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset()))); + __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case); + __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx); + if (ZeroTLAB) { + // the fields have been already cleared + __ jmp(initialize_header); + } else { + // initialize both the header and fields + __ jmp(initialize_object); + } + } + + // Allocation in the shared Eden, if allowed. + // + // rdx: instance size in bytes + if (allow_shared_alloc) { + __ bind(allocate_shared); + + ExternalAddress heap_top((address)Universe::heap()->top_addr()); + ExternalAddress heap_end((address)Universe::heap()->end_addr()); + + Label retry; + __ bind(retry); + __ movptr(rax, heap_top); + __ lea(rbx, Address(rax, rdx, Address::times_1)); + __ cmpptr(rbx, heap_end); + __ jcc(Assembler::above, slow_case); + + // Compare rax, with the top addr, and if still equal, store the new + // top addr in rbx, at the address of the top addr pointer. Sets ZF if was + // equal, and clears it otherwise. Use lock prefix for atomicity on MPs. + // + // rax,: object begin + // rbx,: object end + // rdx: instance size in bytes + __ locked_cmpxchgptr(rbx, heap_top); + + // if someone beat us on the allocation, try again, otherwise continue + __ jcc(Assembler::notEqual, retry); + + __ incr_allocated_bytes(thread, rdx, 0); + } + + if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { + // The object is initialized before the header. If the object size is + // zero, go directly to the header initialization. + __ bind(initialize_object); + __ decrement(rdx, sizeof(oopDesc)); + __ jcc(Assembler::zero, initialize_header); + + // Initialize topmost object field, divide rdx by 8, check if odd and + // test if zero. + __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code) + __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd + + // rdx must have been multiple of 8 +#ifdef ASSERT + // make sure rdx was multiple of 8 + Label L; + // Ignore partial flag stall after shrl() since it is debug VM + __ jccb(Assembler::carryClear, L); + __ stop("object size is not multiple of 2 - adjust this code"); + __ bind(L); + // rdx must be > 0, no extra check needed here +#endif + + // initialize remaining object fields: rdx was a multiple of 8 + { Label loop; + __ bind(loop); + __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx); + NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx)); + __ decrement(rdx); + __ jcc(Assembler::notZero, loop); + } + + // initialize object header only. + __ bind(initialize_header); + if (UseBiasedLocking) { + __ pop(rcx); // get saved klass back in the register. + __ movptr(rbx, Address(rcx, Klass::prototype_header_offset())); + __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx); + } else { + __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), + (intptr_t)markOopDesc::prototype()); // header + __ pop(rcx); // get saved klass back in the register. + } +#ifdef _LP64 + __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code) + __ store_klass_gap(rax, rsi); // zero klass gap for compressed oops +#endif + __ store_klass(rax, rcx); // klass + + { + SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0); + // Trigger dtrace event for fastpath + __ push(atos); + __ call_VM_leaf( + CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax); + __ pop(atos); + } + + __ jmp(done); + } + + // slow case + __ bind(slow_case); + __ pop(rcx); // restore stack pointer to what it was when we came in. + __ bind(slow_case_no_pop); + + Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rax); + Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx); + + __ get_constant_pool(rarg1); + __ get_unsigned_2_byte_index_at_bcp(rarg2, 1); + call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rarg1, rarg2); + __ verify_oop(rax); + + // continue + __ bind(done); +} + +void TemplateTable::newarray() { + transition(itos, atos); + Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rdx); + __ load_unsigned_byte(rarg1, at_bcp(1)); + call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), + rarg1, rax); +} + +void TemplateTable::anewarray() { + transition(itos, atos); + + Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rcx); + Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx); + + __ get_unsigned_2_byte_index_at_bcp(rarg2, 1); + __ get_constant_pool(rarg1); + call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), + rarg1, rarg2, rax); +} + +void TemplateTable::arraylength() { + transition(atos, itos); + __ null_check(rax, arrayOopDesc::length_offset_in_bytes()); + __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes())); +} + +void TemplateTable::checkcast() { + transition(atos, atos); + Label done, is_null, ok_is_subtype, quicked, resolved; + __ testptr(rax, rax); // object is in rax + __ jcc(Assembler::zero, is_null); + + // Get cpool & tags index + __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array + __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index + // See if bytecode has already been quicked + __ cmpb(Address(rdx, rbx, + Address::times_1, + Array::base_offset_in_bytes()), + JVM_CONSTANT_Class); + __ jcc(Assembler::equal, quicked); + __ push(atos); // save receiver for result, and for GC + call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); + + // vm_result_2 has metadata result +#ifndef _LP64 + // borrow rdi from locals + __ get_thread(rdi); + __ get_vm_result_2(rax, rdi); + __ restore_locals(); +#else + __ get_vm_result_2(rax, r15_thread); +#endif + + __ pop_ptr(rdx); // restore receiver + __ jmpb(resolved); + + // Get superklass in rax and subklass in rbx + __ bind(quicked); + __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check + __ movptr(rax, Address(rcx, rbx, + Address::times_ptr, sizeof(ConstantPool))); + + __ bind(resolved); + __ load_klass(rbx, rdx); + + // Generate subtype check. Blows rcx, rdi. Object in rdx. + // Superklass in rax. Subklass in rbx. + __ gen_subtype_check(rbx, ok_is_subtype); + + // Come here on failure + __ push_ptr(rdx); + // object is at TOS + __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry)); + + // Come here on success + __ bind(ok_is_subtype); + __ mov(rax, rdx); // Restore object in rdx + + // Collect counts on whether this check-cast sees NULLs a lot or not. + if (ProfileInterpreter) { + __ jmp(done); + __ bind(is_null); + __ profile_null_seen(rcx); + } else { + __ bind(is_null); // same as 'done' + } + __ bind(done); +} + +void TemplateTable::instanceof() { + transition(atos, itos); + Label done, is_null, ok_is_subtype, quicked, resolved; + __ testptr(rax, rax); + __ jcc(Assembler::zero, is_null); + + // Get cpool & tags index + __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array + __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index + // See if bytecode has already been quicked + __ cmpb(Address(rdx, rbx, + Address::times_1, + Array::base_offset_in_bytes()), + JVM_CONSTANT_Class); + __ jcc(Assembler::equal, quicked); + + __ push(atos); // save receiver for result, and for GC + call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); + // vm_result_2 has metadata result + +#ifndef _LP64 + // borrow rdi from locals + __ get_thread(rdi); + __ get_vm_result_2(rax, rdi); + __ restore_locals(); +#else + __ get_vm_result_2(rax, r15_thread); +#endif + + __ pop_ptr(rdx); // restore receiver + __ verify_oop(rdx); + __ load_klass(rdx, rdx); + __ jmpb(resolved); + + // Get superklass in rax and subklass in rdx + __ bind(quicked); + __ load_klass(rdx, rax); + __ movptr(rax, Address(rcx, rbx, + Address::times_ptr, sizeof(ConstantPool))); + + __ bind(resolved); + + // Generate subtype check. Blows rcx, rdi + // Superklass in rax. Subklass in rdx. + __ gen_subtype_check(rdx, ok_is_subtype); + + // Come here on failure + __ xorl(rax, rax); + __ jmpb(done); + // Come here on success + __ bind(ok_is_subtype); + __ movl(rax, 1); + + // Collect counts on whether this test sees NULLs a lot or not. + if (ProfileInterpreter) { + __ jmp(done); + __ bind(is_null); + __ profile_null_seen(rcx); + } else { + __ bind(is_null); // same as 'done' + } + __ bind(done); + // rax = 0: obj == NULL or obj is not an instanceof the specified klass + // rax = 1: obj != NULL and obj is an instanceof the specified klass +} + + +//---------------------------------------------------------------------------------------------------- +// Breakpoints +void TemplateTable::_breakpoint() { + // Note: We get here even if we are single stepping.. + // jbug insists on setting breakpoints at every bytecode + // even if we are in single step mode. + + transition(vtos, vtos); + + Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rcx); + + // get the unpatched byte code + __ get_method(rarg); + __ call_VM(noreg, + CAST_FROM_FN_PTR(address, + InterpreterRuntime::get_original_bytecode_at), + rarg, rbcp); + __ mov(rbx, rax); // why? + + // post the breakpoint event + __ get_method(rarg); + __ call_VM(noreg, + CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), + rarg, rbcp); + + // complete the execution of original bytecode + __ dispatch_only_normal(vtos); +} + +//----------------------------------------------------------------------------- +// Exceptions + +void TemplateTable::athrow() { + transition(atos, vtos); + __ null_check(rax); + __ jump(ExternalAddress(Interpreter::throw_exception_entry())); +} + +//----------------------------------------------------------------------------- +// Synchronization +// +// Note: monitorenter & exit are symmetric routines; which is reflected +// in the assembly code structure as well +// +// Stack layout: +// +// [expressions ] <--- rsp = expression stack top +// .. +// [expressions ] +// [monitor entry] <--- monitor block top = expression stack bot +// .. +// [monitor entry] +// [frame data ] <--- monitor block bot +// ... +// [saved rbp ] <--- rbp +void TemplateTable::monitorenter() { + transition(atos, vtos); + + // check for NULL object + __ null_check(rax); + + const Address monitor_block_top( + rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); + const Address monitor_block_bot( + rbp, frame::interpreter_frame_initial_sp_offset * wordSize); + const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; + + Label allocated; + + Register rtop = LP64_ONLY(c_rarg3) NOT_LP64(rcx); + Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx); + Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rdx); + + // initialize entry pointer + __ xorl(rmon, rmon); // points to free slot or NULL + + // find a free slot in the monitor block (result in rmon) + { + Label entry, loop, exit; + __ movptr(rtop, monitor_block_top); // points to current entry, + // starting with top-most entry + __ lea(rbot, monitor_block_bot); // points to word before bottom + // of monitor block + __ jmpb(entry); + + __ bind(loop); + // check if current entry is used + __ cmpptr(Address(rtop, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD); + // if not used then remember entry in rmon + __ cmovptr(Assembler::equal, rmon, rtop); // cmov => cmovptr + // check if current entry is for same object + __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes())); + // if same object then stop searching + __ jccb(Assembler::equal, exit); + // otherwise advance to next entry + __ addptr(rtop, entry_size); + __ bind(entry); + // check if bottom reached + __ cmpptr(rtop, rbot); + // if not at bottom then check this entry + __ jcc(Assembler::notEqual, loop); + __ bind(exit); + } + + __ testptr(rmon, rmon); // check if a slot has been found + __ jcc(Assembler::notZero, allocated); // if found, continue with that one + + // allocate one if there's no free slot + { + Label entry, loop; + // 1. compute new pointers // rsp: old expression stack top + __ movptr(rmon, monitor_block_bot); // rmon: old expression stack bottom + __ subptr(rsp, entry_size); // move expression stack top + __ subptr(rmon, entry_size); // move expression stack bottom + __ mov(rtop, rsp); // set start value for copy loop + __ movptr(monitor_block_bot, rmon); // set new monitor block bottom + __ jmp(entry); + // 2. move expression stack contents + __ bind(loop); + __ movptr(rbot, Address(rtop, entry_size)); // load expression stack + // word from old location + __ movptr(Address(rtop, 0), rbot); // and store it at new location + __ addptr(rtop, wordSize); // advance to next word + __ bind(entry); + __ cmpptr(rtop, rmon); // check if bottom reached + __ jcc(Assembler::notEqual, loop); // if not at bottom then + // copy next word + } + + // call run-time routine + // rmon: points to monitor entry + __ bind(allocated); + + // Increment bcp to point to the next bytecode, so exception + // handling for async. exceptions work correctly. + // The object has already been poped from the stack, so the + // expression stack looks correct. + __ increment(rbcp); + + // store object + __ movptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), rax); + __ lock_object(rmon); + + // check to make sure this monitor doesn't cause stack overflow after locking + __ save_bcp(); // in case of exception + __ generate_stack_overflow_check(0); + + // The bcp has already been incremented. Just need to dispatch to + // next instruction. + __ dispatch_next(vtos); +} + +void TemplateTable::monitorexit() { + transition(atos, vtos); + + // check for NULL object + __ null_check(rax); + + const Address monitor_block_top( + rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); + const Address monitor_block_bot( + rbp, frame::interpreter_frame_initial_sp_offset * wordSize); + const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; + + Register rtop = LP64_ONLY(c_rarg1) NOT_LP64(rdx); + Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx); + + Label found; + + // find matching slot + { + Label entry, loop; + __ movptr(rtop, monitor_block_top); // points to current entry, + // starting with top-most entry + __ lea(rbot, monitor_block_bot); // points to word before bottom + // of monitor block + __ jmpb(entry); + + __ bind(loop); + // check if current entry is for same object + __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes())); + // if same object then stop searching + __ jcc(Assembler::equal, found); + // otherwise advance to next entry + __ addptr(rtop, entry_size); + __ bind(entry); + // check if bottom reached + __ cmpptr(rtop, rbot); + // if not at bottom then check this entry + __ jcc(Assembler::notEqual, loop); + } + + // error handling. Unlocking was not block-structured + __ call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::throw_illegal_monitor_state_exception)); + __ should_not_reach_here(); + + // call run-time routine + __ bind(found); + __ push_ptr(rax); // make sure object is on stack (contract with oopMaps) + __ unlock_object(rtop); + __ pop_ptr(rax); // discard object +} + +// Wide instructions +void TemplateTable::wide() { + transition(vtos, vtos); + __ load_unsigned_byte(rbx, at_bcp(1)); + ExternalAddress wtable((address)Interpreter::_wentry_point); + __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr))); + // Note: the rbcp increment step is part of the individual wide bytecode implementations +} + +// Multi arrays +void TemplateTable::multianewarray() { + transition(vtos, atos); + + Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rax); + __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions + // last dim is on top of stack; we want address of first one: + // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize + // the latter wordSize to point to the beginning of the array. + __ lea(rarg, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize)); + call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rarg); + __ load_unsigned_byte(rbx, at_bcp(3)); + __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts +} +#endif /* !CC_INTERP */ + diff -r 17efac395638 -r a46e5922bb40 hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp --- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp Thu Mar 19 16:13:40 2015 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,3668 +0,0 @@ -/* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "asm/macroAssembler.hpp" -#include "interpreter/interpreter.hpp" -#include "interpreter/interpreterRuntime.hpp" -#include "interpreter/interp_masm.hpp" -#include "interpreter/templateTable.hpp" -#include "memory/universe.inline.hpp" -#include "oops/methodData.hpp" -#include "oops/objArrayKlass.hpp" -#include "oops/oop.inline.hpp" -#include "prims/methodHandles.hpp" -#include "runtime/sharedRuntime.hpp" -#include "runtime/stubRoutines.hpp" -#include "runtime/synchronizer.hpp" -#include "utilities/macros.hpp" - -#ifndef CC_INTERP -#define __ _masm-> - -//---------------------------------------------------------------------------------------------------- -// Platform-dependent initialization - -void TemplateTable::pd_initialize() { - // No i486 specific initialization -} - -//---------------------------------------------------------------------------------------------------- -// Address computation - -// local variables -static inline Address iaddress(int n) { - return Address(rdi, Interpreter::local_offset_in_bytes(n)); -} - -static inline Address laddress(int n) { return iaddress(n + 1); } -static inline Address haddress(int n) { return iaddress(n + 0); } -static inline Address faddress(int n) { return iaddress(n); } -static inline Address daddress(int n) { return laddress(n); } -static inline Address aaddress(int n) { return iaddress(n); } - -static inline Address iaddress(Register r) { - return Address(rdi, r, Interpreter::stackElementScale()); -} -static inline Address laddress(Register r) { - return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(1)); -} -static inline Address haddress(Register r) { - return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0)); -} - -static inline Address faddress(Register r) { return iaddress(r); } -static inline Address daddress(Register r) { return laddress(r); } -static inline Address aaddress(Register r) { return iaddress(r); } - -// expression stack -// (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store -// data beyond the rsp which is potentially unsafe in an MT environment; -// an interrupt may overwrite that data.) -static inline Address at_rsp () { - return Address(rsp, 0); -} - -// At top of Java expression stack which may be different than rsp(). It -// isn't for category 1 objects. -static inline Address at_tos () { - Address tos = Address(rsp, Interpreter::expr_offset_in_bytes(0)); - return tos; -} - -static inline Address at_tos_p1() { - return Address(rsp, Interpreter::expr_offset_in_bytes(1)); -} - -static inline Address at_tos_p2() { - return Address(rsp, Interpreter::expr_offset_in_bytes(2)); -} - -// Condition conversion -static Assembler::Condition j_not(TemplateTable::Condition cc) { - switch (cc) { - case TemplateTable::equal : return Assembler::notEqual; - case TemplateTable::not_equal : return Assembler::equal; - case TemplateTable::less : return Assembler::greaterEqual; - case TemplateTable::less_equal : return Assembler::greater; - case TemplateTable::greater : return Assembler::lessEqual; - case TemplateTable::greater_equal: return Assembler::less; - } - ShouldNotReachHere(); - return Assembler::zero; -} - - -//---------------------------------------------------------------------------------------------------- -// Miscelaneous helper routines - -// Store an oop (or NULL) at the address described by obj. -// If val == noreg this means store a NULL - -static void do_oop_store(InterpreterMacroAssembler* _masm, - Address obj, - Register val, - BarrierSet::Name barrier, - bool precise) { - assert(val == noreg || val == rax, "parameter is just for looks"); - switch (barrier) { -#if INCLUDE_ALL_GCS - case BarrierSet::G1SATBCT: - case BarrierSet::G1SATBCTLogging: - { - // flatten object address if needed - // We do it regardless of precise because we need the registers - if (obj.index() == noreg && obj.disp() == 0) { - if (obj.base() != rdx) { - __ movl(rdx, obj.base()); - } - } else { - __ leal(rdx, obj); - } - __ get_thread(rcx); - __ save_bcp(); - __ g1_write_barrier_pre(rdx /* obj */, - rbx /* pre_val */, - rcx /* thread */, - rsi /* tmp */, - val != noreg /* tosca_live */, - false /* expand_call */); - - // Do the actual store - // noreg means NULL - if (val == noreg) { - __ movptr(Address(rdx, 0), NULL_WORD); - // No post barrier for NULL - } else { - __ movl(Address(rdx, 0), val); - __ g1_write_barrier_post(rdx /* store_adr */, - val /* new_val */, - rcx /* thread */, - rbx /* tmp */, - rsi /* tmp2 */); - } - __ restore_bcp(); - - } - break; -#endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableModRef: - case BarrierSet::CardTableExtension: - { - if (val == noreg) { - __ movptr(obj, NULL_WORD); - } else { - __ movl(obj, val); - // flatten object address if needed - if (!precise || (obj.index() == noreg && obj.disp() == 0)) { - __ store_check(obj.base()); - } else { - __ leal(rdx, obj); - __ store_check(rdx); - } - } - } - break; - case BarrierSet::ModRef: - if (val == noreg) { - __ movptr(obj, NULL_WORD); - } else { - __ movl(obj, val); - } - break; - default : - ShouldNotReachHere(); - - } -} - -Address TemplateTable::at_bcp(int offset) { - assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); - return Address(rsi, offset); -} - - -void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, - Register temp_reg, bool load_bc_into_bc_reg/*=true*/, - int byte_no) { - if (!RewriteBytecodes) return; - Label L_patch_done; - - switch (bc) { - case Bytecodes::_fast_aputfield: - case Bytecodes::_fast_bputfield: - case Bytecodes::_fast_cputfield: - case Bytecodes::_fast_dputfield: - case Bytecodes::_fast_fputfield: - case Bytecodes::_fast_iputfield: - case Bytecodes::_fast_lputfield: - case Bytecodes::_fast_sputfield: - { - // We skip bytecode quickening for putfield instructions when - // the put_code written to the constant pool cache is zero. - // This is required so that every execution of this instruction - // calls out to InterpreterRuntime::resolve_get_put to do - // additional, required work. - assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); - assert(load_bc_into_bc_reg, "we use bc_reg as temp"); - __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1); - __ movl(bc_reg, bc); - __ cmpl(temp_reg, (int) 0); - __ jcc(Assembler::zero, L_patch_done); // don't patch - } - break; - default: - assert(byte_no == -1, "sanity"); - // the pair bytecodes have already done the load. - if (load_bc_into_bc_reg) { - __ movl(bc_reg, bc); - } - } - - if (JvmtiExport::can_post_breakpoint()) { - Label L_fast_patch; - // if a breakpoint is present we can't rewrite the stream directly - __ movzbl(temp_reg, at_bcp(0)); - __ cmpl(temp_reg, Bytecodes::_breakpoint); - __ jcc(Assembler::notEqual, L_fast_patch); - __ get_method(temp_reg); - // Let breakpoint table handling rewrite to quicker bytecode - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rsi, bc_reg); -#ifndef ASSERT - __ jmpb(L_patch_done); -#else - __ jmp(L_patch_done); -#endif - __ bind(L_fast_patch); - } - -#ifdef ASSERT - Label L_okay; - __ load_unsigned_byte(temp_reg, at_bcp(0)); - __ cmpl(temp_reg, (int)Bytecodes::java_code(bc)); - __ jccb(Assembler::equal, L_okay); - __ cmpl(temp_reg, bc_reg); - __ jcc(Assembler::equal, L_okay); - __ stop("patching the wrong bytecode"); - __ bind(L_okay); -#endif - - // patch bytecode - __ movb(at_bcp(0), bc_reg); - __ bind(L_patch_done); -} - -//---------------------------------------------------------------------------------------------------- -// Individual instructions - -void TemplateTable::nop() { - transition(vtos, vtos); - // nothing to do -} - -void TemplateTable::shouldnotreachhere() { - transition(vtos, vtos); - __ stop("shouldnotreachhere bytecode"); -} - - - -void TemplateTable::aconst_null() { - transition(vtos, atos); - __ xorptr(rax, rax); -} - - -void TemplateTable::iconst(int value) { - transition(vtos, itos); - if (value == 0) { - __ xorptr(rax, rax); - } else { - __ movptr(rax, value); - } -} - - -void TemplateTable::lconst(int value) { - transition(vtos, ltos); - if (value == 0) { - __ xorptr(rax, rax); - } else { - __ movptr(rax, value); - } - assert(value >= 0, "check this code"); - __ xorptr(rdx, rdx); -} - - -void TemplateTable::fconst(int value) { - transition(vtos, ftos); - if (value == 0) { __ fldz(); - } else if (value == 1) { __ fld1(); - } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here - } else { ShouldNotReachHere(); - } -} - - -void TemplateTable::dconst(int value) { - transition(vtos, dtos); - if (value == 0) { __ fldz(); - } else if (value == 1) { __ fld1(); - } else { ShouldNotReachHere(); - } -} - - -void TemplateTable::bipush() { - transition(vtos, itos); - __ load_signed_byte(rax, at_bcp(1)); -} - - -void TemplateTable::sipush() { - transition(vtos, itos); - __ load_unsigned_short(rax, at_bcp(1)); - __ bswapl(rax); - __ sarl(rax, 16); -} - -void TemplateTable::ldc(bool wide) { - transition(vtos, vtos); - Label call_ldc, notFloat, notClass, Done; - - if (wide) { - __ get_unsigned_2_byte_index_at_bcp(rbx, 1); - } else { - __ load_unsigned_byte(rbx, at_bcp(1)); - } - __ get_cpool_and_tags(rcx, rax); - const int base_offset = ConstantPool::header_size() * wordSize; - const int tags_offset = Array::base_offset_in_bytes(); - - // get type - __ xorptr(rdx, rdx); - __ movb(rdx, Address(rax, rbx, Address::times_1, tags_offset)); - - // unresolved class - get the resolved class - __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass); - __ jccb(Assembler::equal, call_ldc); - - // unresolved class in error (resolution failed) - call into runtime - // so that the same error from first resolution attempt is thrown. - __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError); - __ jccb(Assembler::equal, call_ldc); - - // resolved class - need to call vm to get java mirror of the class - __ cmpl(rdx, JVM_CONSTANT_Class); - __ jcc(Assembler::notEqual, notClass); - - __ bind(call_ldc); - __ movl(rcx, wide); - call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rcx); - __ push(atos); - __ jmp(Done); - - __ bind(notClass); - __ cmpl(rdx, JVM_CONSTANT_Float); - __ jccb(Assembler::notEqual, notFloat); - // ftos - __ fld_s( Address(rcx, rbx, Address::times_ptr, base_offset)); - __ push(ftos); - __ jmp(Done); - - __ bind(notFloat); -#ifdef ASSERT - { Label L; - __ cmpl(rdx, JVM_CONSTANT_Integer); - __ jcc(Assembler::equal, L); - // String and Object are rewritten to fast_aldc - __ stop("unexpected tag type in ldc"); - __ bind(L); - } -#endif - // itos JVM_CONSTANT_Integer only - __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset)); - __ push(itos); - __ bind(Done); -} - -// Fast path for caching oop constants. -void TemplateTable::fast_aldc(bool wide) { - transition(vtos, atos); - - Register result = rax; - Register tmp = rdx; - int index_size = wide ? sizeof(u2) : sizeof(u1); - - Label resolved; - - // We are resolved if the resolved reference cache entry contains a - // non-null object (String, MethodType, etc.) - assert_different_registers(result, tmp); - __ get_cache_index_at_bcp(tmp, 1, index_size); - __ load_resolved_reference_at_index(result, tmp); - __ testl(result, result); - __ jcc(Assembler::notZero, resolved); - - address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); - - // first time invocation - must resolve first - __ movl(tmp, (int)bytecode()); - __ call_VM(result, entry, tmp); - - __ bind(resolved); - - if (VerifyOops) { - __ verify_oop(result); - } -} - -void TemplateTable::ldc2_w() { - transition(vtos, vtos); - Label Long, Done; - __ get_unsigned_2_byte_index_at_bcp(rbx, 1); - - __ get_cpool_and_tags(rcx, rax); - const int base_offset = ConstantPool::header_size() * wordSize; - const int tags_offset = Array::base_offset_in_bytes(); - - // get type - __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), JVM_CONSTANT_Double); - __ jccb(Assembler::notEqual, Long); - // dtos - __ fld_d( Address(rcx, rbx, Address::times_ptr, base_offset)); - __ push(dtos); - __ jmpb(Done); - - __ bind(Long); - // ltos - __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize)); - NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize))); - - __ push(ltos); - - __ bind(Done); -} - - -void TemplateTable::locals_index(Register reg, int offset) { - __ load_unsigned_byte(reg, at_bcp(offset)); - __ negptr(reg); -} - - -void TemplateTable::iload() { - transition(vtos, itos); - if (RewriteFrequentPairs) { - Label rewrite, done; - - // get next byte - __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_iload))); - // if _iload, wait to rewrite to iload2. We only want to rewrite the - // last two iloads in a pair. Comparing against fast_iload means that - // the next bytecode is neither an iload or a caload, and therefore - // an iload pair. - __ cmpl(rbx, Bytecodes::_iload); - __ jcc(Assembler::equal, done); - - __ cmpl(rbx, Bytecodes::_fast_iload); - __ movl(rcx, Bytecodes::_fast_iload2); - __ jccb(Assembler::equal, rewrite); - - // if _caload, rewrite to fast_icaload - __ cmpl(rbx, Bytecodes::_caload); - __ movl(rcx, Bytecodes::_fast_icaload); - __ jccb(Assembler::equal, rewrite); - - // rewrite so iload doesn't check again. - __ movl(rcx, Bytecodes::_fast_iload); - - // rewrite - // rcx: fast bytecode - __ bind(rewrite); - patch_bytecode(Bytecodes::_iload, rcx, rbx, false); - __ bind(done); - } - - // Get the local value into tos - locals_index(rbx); - __ movl(rax, iaddress(rbx)); -} - - -void TemplateTable::fast_iload2() { - transition(vtos, itos); - locals_index(rbx); - __ movl(rax, iaddress(rbx)); - __ push(itos); - locals_index(rbx, 3); - __ movl(rax, iaddress(rbx)); -} - -void TemplateTable::fast_iload() { - transition(vtos, itos); - locals_index(rbx); - __ movl(rax, iaddress(rbx)); -} - - -void TemplateTable::lload() { - transition(vtos, ltos); - locals_index(rbx); - __ movptr(rax, laddress(rbx)); - NOT_LP64(__ movl(rdx, haddress(rbx))); -} - - -void TemplateTable::fload() { - transition(vtos, ftos); - locals_index(rbx); - __ fld_s(faddress(rbx)); -} - - -void TemplateTable::dload() { - transition(vtos, dtos); - locals_index(rbx); - __ fld_d(daddress(rbx)); -} - - -void TemplateTable::aload() { - transition(vtos, atos); - locals_index(rbx); - __ movptr(rax, aaddress(rbx)); -} - - -void TemplateTable::locals_index_wide(Register reg) { - __ load_unsigned_short(reg, at_bcp(2)); - __ bswapl(reg); - __ shrl(reg, 16); - __ negptr(reg); -} - - -void TemplateTable::wide_iload() { - transition(vtos, itos); - locals_index_wide(rbx); - __ movl(rax, iaddress(rbx)); -} - - -void TemplateTable::wide_lload() { - transition(vtos, ltos); - locals_index_wide(rbx); - __ movptr(rax, laddress(rbx)); - NOT_LP64(__ movl(rdx, haddress(rbx))); -} - - -void TemplateTable::wide_fload() { - transition(vtos, ftos); - locals_index_wide(rbx); - __ fld_s(faddress(rbx)); -} - - -void TemplateTable::wide_dload() { - transition(vtos, dtos); - locals_index_wide(rbx); - __ fld_d(daddress(rbx)); -} - - -void TemplateTable::wide_aload() { - transition(vtos, atos); - locals_index_wide(rbx); - __ movptr(rax, aaddress(rbx)); -} - -void TemplateTable::index_check(Register array, Register index) { - // Pop ptr into array - __ pop_ptr(array); - index_check_without_pop(array, index); -} - -void TemplateTable::index_check_without_pop(Register array, Register index) { - // destroys rbx, - // check array - __ null_check(array, arrayOopDesc::length_offset_in_bytes()); - LP64_ONLY(__ movslq(index, index)); - // check index - __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes())); - if (index != rbx) { - // ??? convention: move aberrant index into rbx, for exception message - assert(rbx != array, "different registers"); - __ mov(rbx, index); - } - __ jump_cc(Assembler::aboveEqual, - ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry)); -} - - -void TemplateTable::iaload() { - transition(itos, itos); - // rdx: array - index_check(rdx, rax); // kills rbx, - // rax,: index - __ movl(rax, Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT))); -} - - -void TemplateTable::laload() { - transition(itos, ltos); - // rax,: index - // rdx: array - index_check(rdx, rax); - __ mov(rbx, rax); - // rbx,: index - __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize)); - NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize))); -} - - -void TemplateTable::faload() { - transition(itos, ftos); - // rdx: array - index_check(rdx, rax); // kills rbx, - // rax,: index - __ fld_s(Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT))); -} - - -void TemplateTable::daload() { - transition(itos, dtos); - // rdx: array - index_check(rdx, rax); // kills rbx, - // rax,: index - __ fld_d(Address(rdx, rax, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE))); -} - - -void TemplateTable::aaload() { - transition(itos, atos); - // rdx: array - index_check(rdx, rax); // kills rbx, - // rax,: index - __ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); -} - - -void TemplateTable::baload() { - transition(itos, itos); - // rdx: array - index_check(rdx, rax); // kills rbx, - // rax,: index - // can do better code for P5 - fix this at some point - __ load_signed_byte(rbx, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE))); - __ mov(rax, rbx); -} - - -void TemplateTable::caload() { - transition(itos, itos); - // rdx: array - index_check(rdx, rax); // kills rbx, - // rax,: index - // can do better code for P5 - may want to improve this at some point - __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); - __ mov(rax, rbx); -} - -// iload followed by caload frequent pair -void TemplateTable::fast_icaload() { - transition(vtos, itos); - // load index out of locals - locals_index(rbx); - __ movl(rax, iaddress(rbx)); - - // rdx: array - index_check(rdx, rax); - // rax,: index - __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); - __ mov(rax, rbx); -} - -void TemplateTable::saload() { - transition(itos, itos); - // rdx: array - index_check(rdx, rax); // kills rbx, - // rax,: index - // can do better code for P5 - may want to improve this at some point - __ load_signed_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT))); - __ mov(rax, rbx); -} - - -void TemplateTable::iload(int n) { - transition(vtos, itos); - __ movl(rax, iaddress(n)); -} - - -void TemplateTable::lload(int n) { - transition(vtos, ltos); - __ movptr(rax, laddress(n)); - NOT_LP64(__ movptr(rdx, haddress(n))); -} - - -void TemplateTable::fload(int n) { - transition(vtos, ftos); - __ fld_s(faddress(n)); -} - - -void TemplateTable::dload(int n) { - transition(vtos, dtos); - __ fld_d(daddress(n)); -} - - -void TemplateTable::aload(int n) { - transition(vtos, atos); - __ movptr(rax, aaddress(n)); -} - - -void TemplateTable::aload_0() { - transition(vtos, atos); - // According to bytecode histograms, the pairs: - // - // _aload_0, _fast_igetfield - // _aload_0, _fast_agetfield - // _aload_0, _fast_fgetfield - // - // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0 - // bytecode checks if the next bytecode is either _fast_igetfield, - // _fast_agetfield or _fast_fgetfield and then rewrites the - // current bytecode into a pair bytecode; otherwise it rewrites the current - // bytecode into _fast_aload_0 that doesn't do the pair check anymore. - // - // Note: If the next bytecode is _getfield, the rewrite must be delayed, - // otherwise we may miss an opportunity for a pair. - // - // Also rewrite frequent pairs - // aload_0, aload_1 - // aload_0, iload_1 - // These bytecodes with a small amount of code are most profitable to rewrite - if (RewriteFrequentPairs) { - Label rewrite, done; - // get next byte - __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0))); - - // do actual aload_0 - aload(0); - - // if _getfield then wait with rewrite - __ cmpl(rbx, Bytecodes::_getfield); - __ jcc(Assembler::equal, done); - - // if _igetfield then reqrite to _fast_iaccess_0 - assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); - __ cmpl(rbx, Bytecodes::_fast_igetfield); - __ movl(rcx, Bytecodes::_fast_iaccess_0); - __ jccb(Assembler::equal, rewrite); - - // if _agetfield then reqrite to _fast_aaccess_0 - assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); - __ cmpl(rbx, Bytecodes::_fast_agetfield); - __ movl(rcx, Bytecodes::_fast_aaccess_0); - __ jccb(Assembler::equal, rewrite); - - // if _fgetfield then reqrite to _fast_faccess_0 - assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); - __ cmpl(rbx, Bytecodes::_fast_fgetfield); - __ movl(rcx, Bytecodes::_fast_faccess_0); - __ jccb(Assembler::equal, rewrite); - - // else rewrite to _fast_aload0 - assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition"); - __ movl(rcx, Bytecodes::_fast_aload_0); - - // rewrite - // rcx: fast bytecode - __ bind(rewrite); - patch_bytecode(Bytecodes::_aload_0, rcx, rbx, false); - - __ bind(done); - } else { - aload(0); - } -} - -void TemplateTable::istore() { - transition(itos, vtos); - locals_index(rbx); - __ movl(iaddress(rbx), rax); -} - - -void TemplateTable::lstore() { - transition(ltos, vtos); - locals_index(rbx); - __ movptr(laddress(rbx), rax); - NOT_LP64(__ movptr(haddress(rbx), rdx)); -} - - -void TemplateTable::fstore() { - transition(ftos, vtos); - locals_index(rbx); - __ fstp_s(faddress(rbx)); -} - - -void TemplateTable::dstore() { - transition(dtos, vtos); - locals_index(rbx); - __ fstp_d(daddress(rbx)); -} - - -void TemplateTable::astore() { - transition(vtos, vtos); - __ pop_ptr(rax); - locals_index(rbx); - __ movptr(aaddress(rbx), rax); -} - - -void TemplateTable::wide_istore() { - transition(vtos, vtos); - __ pop_i(rax); - locals_index_wide(rbx); - __ movl(iaddress(rbx), rax); -} - - -void TemplateTable::wide_lstore() { - transition(vtos, vtos); - __ pop_l(rax, rdx); - locals_index_wide(rbx); - __ movptr(laddress(rbx), rax); - NOT_LP64(__ movl(haddress(rbx), rdx)); -} - - -void TemplateTable::wide_fstore() { - wide_istore(); -} - - -void TemplateTable::wide_dstore() { - wide_lstore(); -} - - -void TemplateTable::wide_astore() { - transition(vtos, vtos); - __ pop_ptr(rax); - locals_index_wide(rbx); - __ movptr(aaddress(rbx), rax); -} - - -void TemplateTable::iastore() { - transition(itos, vtos); - __ pop_i(rbx); - // rax,: value - // rdx: array - index_check(rdx, rbx); // prefer index in rbx, - // rbx,: index - __ movl(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)), rax); -} - - -void TemplateTable::lastore() { - transition(ltos, vtos); - __ pop_i(rbx); - // rax,: low(value) - // rcx: array - // rdx: high(value) - index_check(rcx, rbx); // prefer index in rbx, - // rbx,: index - __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax); - NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx)); -} - - -void TemplateTable::fastore() { - transition(ftos, vtos); - __ pop_i(rbx); - // rdx: array - // st0: value - index_check(rdx, rbx); // prefer index in rbx, - // rbx,: index - __ fstp_s(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT))); -} - - -void TemplateTable::dastore() { - transition(dtos, vtos); - __ pop_i(rbx); - // rdx: array - // st0: value - index_check(rdx, rbx); // prefer index in rbx, - // rbx,: index - __ fstp_d(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE))); -} - - -void TemplateTable::aastore() { - Label is_null, ok_is_subtype, done; - transition(vtos, vtos); - // stack: ..., array, index, value - __ movptr(rax, at_tos()); // Value - __ movl(rcx, at_tos_p1()); // Index - __ movptr(rdx, at_tos_p2()); // Array - - Address element_address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); - index_check_without_pop(rdx, rcx); // kills rbx, - // do array store check - check for NULL value first - __ testptr(rax, rax); - __ jcc(Assembler::zero, is_null); - - // Move subklass into EBX - __ load_klass(rbx, rax); - // Move superklass into EAX - __ load_klass(rax, rdx); - __ movptr(rax, Address(rax, ObjArrayKlass::element_klass_offset())); - // Compress array+index*wordSize+12 into a single register. Frees ECX. - __ lea(rdx, element_address); - - // Generate subtype check. Blows ECX. Resets EDI to locals. - // Superklass in EAX. Subklass in EBX. - __ gen_subtype_check( rbx, ok_is_subtype ); - - // Come here on failure - // object is at TOS - __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry)); - - // Come here on success - __ bind(ok_is_subtype); - - // Get the value to store - __ movptr(rax, at_rsp()); - // and store it with appropriate barrier - do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true); - - __ jmp(done); - - // Have a NULL in EAX, EDX=array, ECX=index. Store NULL at ary[idx] - __ bind(is_null); - __ profile_null_seen(rbx); - - // Store NULL, (noreg means NULL to do_oop_store) - do_oop_store(_masm, element_address, noreg, _bs->kind(), true); - - // Pop stack arguments - __ bind(done); - __ addptr(rsp, 3 * Interpreter::stackElementSize); -} - - -void TemplateTable::bastore() { - transition(itos, vtos); - __ pop_i(rbx); - // rax,: value - // rdx: array - index_check(rdx, rbx); // prefer index in rbx, - // rbx,: index - __ movb(Address(rdx, rbx, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)), rax); -} - - -void TemplateTable::castore() { - transition(itos, vtos); - __ pop_i(rbx); - // rax,: value - // rdx: array - index_check(rdx, rbx); // prefer index in rbx, - // rbx,: index - __ movw(Address(rdx, rbx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)), rax); -} - - -void TemplateTable::sastore() { - castore(); -} - - -void TemplateTable::istore(int n) { - transition(itos, vtos); - __ movl(iaddress(n), rax); -} - - -void TemplateTable::lstore(int n) { - transition(ltos, vtos); - __ movptr(laddress(n), rax); - NOT_LP64(__ movptr(haddress(n), rdx)); -} - - -void TemplateTable::fstore(int n) { - transition(ftos, vtos); - __ fstp_s(faddress(n)); -} - - -void TemplateTable::dstore(int n) { - transition(dtos, vtos); - __ fstp_d(daddress(n)); -} - - -void TemplateTable::astore(int n) { - transition(vtos, vtos); - __ pop_ptr(rax); - __ movptr(aaddress(n), rax); -} - - -void TemplateTable::pop() { - transition(vtos, vtos); - __ addptr(rsp, Interpreter::stackElementSize); -} - - -void TemplateTable::pop2() { - transition(vtos, vtos); - __ addptr(rsp, 2*Interpreter::stackElementSize); -} - - -void TemplateTable::dup() { - transition(vtos, vtos); - // stack: ..., a - __ load_ptr(0, rax); - __ push_ptr(rax); - // stack: ..., a, a -} - - -void TemplateTable::dup_x1() { - transition(vtos, vtos); - // stack: ..., a, b - __ load_ptr( 0, rax); // load b - __ load_ptr( 1, rcx); // load a - __ store_ptr(1, rax); // store b - __ store_ptr(0, rcx); // store a - __ push_ptr(rax); // push b - // stack: ..., b, a, b -} - - -void TemplateTable::dup_x2() { - transition(vtos, vtos); - // stack: ..., a, b, c - __ load_ptr( 0, rax); // load c - __ load_ptr( 2, rcx); // load a - __ store_ptr(2, rax); // store c in a - __ push_ptr(rax); // push c - // stack: ..., c, b, c, c - __ load_ptr( 2, rax); // load b - __ store_ptr(2, rcx); // store a in b - // stack: ..., c, a, c, c - __ store_ptr(1, rax); // store b in c - // stack: ..., c, a, b, c -} - - -void TemplateTable::dup2() { - transition(vtos, vtos); - // stack: ..., a, b - __ load_ptr(1, rax); // load a - __ push_ptr(rax); // push a - __ load_ptr(1, rax); // load b - __ push_ptr(rax); // push b - // stack: ..., a, b, a, b -} - - -void TemplateTable::dup2_x1() { - transition(vtos, vtos); - // stack: ..., a, b, c - __ load_ptr( 0, rcx); // load c - __ load_ptr( 1, rax); // load b - __ push_ptr(rax); // push b - __ push_ptr(rcx); // push c - // stack: ..., a, b, c, b, c - __ store_ptr(3, rcx); // store c in b - // stack: ..., a, c, c, b, c - __ load_ptr( 4, rcx); // load a - __ store_ptr(2, rcx); // store a in 2nd c - // stack: ..., a, c, a, b, c - __ store_ptr(4, rax); // store b in a - // stack: ..., b, c, a, b, c - // stack: ..., b, c, a, b, c -} - - -void TemplateTable::dup2_x2() { - transition(vtos, vtos); - // stack: ..., a, b, c, d - __ load_ptr( 0, rcx); // load d - __ load_ptr( 1, rax); // load c - __ push_ptr(rax); // push c - __ push_ptr(rcx); // push d - // stack: ..., a, b, c, d, c, d - __ load_ptr( 4, rax); // load b - __ store_ptr(2, rax); // store b in d - __ store_ptr(4, rcx); // store d in b - // stack: ..., a, d, c, b, c, d - __ load_ptr( 5, rcx); // load a - __ load_ptr( 3, rax); // load c - __ store_ptr(3, rcx); // store a in c - __ store_ptr(5, rax); // store c in a - // stack: ..., c, d, a, b, c, d - // stack: ..., c, d, a, b, c, d -} - - -void TemplateTable::swap() { - transition(vtos, vtos); - // stack: ..., a, b - __ load_ptr( 1, rcx); // load a - __ load_ptr( 0, rax); // load b - __ store_ptr(0, rcx); // store a in b - __ store_ptr(1, rax); // store b in a - // stack: ..., b, a -} - - -void TemplateTable::iop2(Operation op) { - transition(itos, itos); - switch (op) { - case add : __ pop_i(rdx); __ addl (rax, rdx); break; - case sub : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break; - case mul : __ pop_i(rdx); __ imull(rax, rdx); break; - case _and : __ pop_i(rdx); __ andl (rax, rdx); break; - case _or : __ pop_i(rdx); __ orl (rax, rdx); break; - case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break; - case shl : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax); break; // implicit masking of lower 5 bits by Intel shift instr. - case shr : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax); break; // implicit masking of lower 5 bits by Intel shift instr. - case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax); break; // implicit masking of lower 5 bits by Intel shift instr. - default : ShouldNotReachHere(); - } -} - - -void TemplateTable::lop2(Operation op) { - transition(ltos, ltos); - __ pop_l(rbx, rcx); - switch (op) { - case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break; - case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx); - __ mov (rax, rbx); __ mov (rdx, rcx); break; - case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break; - case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break; - case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break; - default : ShouldNotReachHere(); - } -} - - -void TemplateTable::idiv() { - transition(itos, itos); - __ mov(rcx, rax); - __ pop_i(rax); - // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If - // they are not equal, one could do a normal division (no correction - // needed), which may speed up this implementation for the common case. - // (see also JVM spec., p.243 & p.271) - __ corrected_idivl(rcx); -} - - -void TemplateTable::irem() { - transition(itos, itos); - __ mov(rcx, rax); - __ pop_i(rax); - // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If - // they are not equal, one could do a normal division (no correction - // needed), which may speed up this implementation for the common case. - // (see also JVM spec., p.243 & p.271) - __ corrected_idivl(rcx); - __ mov(rax, rdx); -} - - -void TemplateTable::lmul() { - transition(ltos, ltos); - __ pop_l(rbx, rcx); - __ push(rcx); __ push(rbx); - __ push(rdx); __ push(rax); - __ lmul(2 * wordSize, 0); - __ addptr(rsp, 4 * wordSize); // take off temporaries -} - - -void TemplateTable::ldiv() { - transition(ltos, ltos); - __ pop_l(rbx, rcx); - __ push(rcx); __ push(rbx); - __ push(rdx); __ push(rax); - // check if y = 0 - __ orl(rax, rdx); - __ jump_cc(Assembler::zero, - ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv)); - __ addptr(rsp, 4 * wordSize); // take off temporaries -} - - -void TemplateTable::lrem() { - transition(ltos, ltos); - __ pop_l(rbx, rcx); - __ push(rcx); __ push(rbx); - __ push(rdx); __ push(rax); - // check if y = 0 - __ orl(rax, rdx); - __ jump_cc(Assembler::zero, - ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem)); - __ addptr(rsp, 4 * wordSize); -} - - -void TemplateTable::lshl() { - transition(itos, ltos); - __ movl(rcx, rax); // get shift count - __ pop_l(rax, rdx); // get shift value - __ lshl(rdx, rax); -} - - -void TemplateTable::lshr() { - transition(itos, ltos); - __ mov(rcx, rax); // get shift count - __ pop_l(rax, rdx); // get shift value - __ lshr(rdx, rax, true); -} - - -void TemplateTable::lushr() { - transition(itos, ltos); - __ mov(rcx, rax); // get shift count - __ pop_l(rax, rdx); // get shift value - __ lshr(rdx, rax); -} - - -void TemplateTable::fop2(Operation op) { - transition(ftos, ftos); - switch (op) { - case add: __ fadd_s (at_rsp()); break; - case sub: __ fsubr_s(at_rsp()); break; - case mul: __ fmul_s (at_rsp()); break; - case div: __ fdivr_s(at_rsp()); break; - case rem: __ fld_s (at_rsp()); __ fremr(rax); break; - default : ShouldNotReachHere(); - } - __ f2ieee(); - __ pop(rax); // pop float thing off -} - - -void TemplateTable::dop2(Operation op) { - transition(dtos, dtos); - - switch (op) { - case add: __ fadd_d (at_rsp()); break; - case sub: __ fsubr_d(at_rsp()); break; - case mul: { - Label L_strict; - Label L_join; - const Address access_flags (rcx, Method::access_flags_offset()); - __ get_method(rcx); - __ movl(rcx, access_flags); - __ testl(rcx, JVM_ACC_STRICT); - __ jccb(Assembler::notZero, L_strict); - __ fmul_d (at_rsp()); - __ jmpb(L_join); - __ bind(L_strict); - __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1())); - __ fmulp(); - __ fmul_d (at_rsp()); - __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2())); - __ fmulp(); - __ bind(L_join); - break; - } - case div: { - Label L_strict; - Label L_join; - const Address access_flags (rcx, Method::access_flags_offset()); - __ get_method(rcx); - __ movl(rcx, access_flags); - __ testl(rcx, JVM_ACC_STRICT); - __ jccb(Assembler::notZero, L_strict); - __ fdivr_d(at_rsp()); - __ jmp(L_join); - __ bind(L_strict); - __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1())); - __ fmul_d (at_rsp()); - __ fdivrp(); - __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2())); - __ fmulp(); - __ bind(L_join); - break; - } - case rem: __ fld_d (at_rsp()); __ fremr(rax); break; - default : ShouldNotReachHere(); - } - __ d2ieee(); - // Pop double precision number from rsp. - __ pop(rax); - __ pop(rdx); -} - - -void TemplateTable::ineg() { - transition(itos, itos); - __ negl(rax); -} - - -void TemplateTable::lneg() { - transition(ltos, ltos); - __ lneg(rdx, rax); -} - - -void TemplateTable::fneg() { - transition(ftos, ftos); - __ fchs(); -} - - -void TemplateTable::dneg() { - transition(dtos, dtos); - __ fchs(); -} - - -void TemplateTable::iinc() { - transition(vtos, vtos); - __ load_signed_byte(rdx, at_bcp(2)); // get constant - locals_index(rbx); - __ addl(iaddress(rbx), rdx); -} - - -void TemplateTable::wide_iinc() { - transition(vtos, vtos); - __ movl(rdx, at_bcp(4)); // get constant - locals_index_wide(rbx); - __ bswapl(rdx); // swap bytes & sign-extend constant - __ sarl(rdx, 16); - __ addl(iaddress(rbx), rdx); - // Note: should probably use only one movl to get both - // the index and the constant -> fix this -} - - -void TemplateTable::convert() { - // Checking -#ifdef ASSERT - { TosState tos_in = ilgl; - TosState tos_out = ilgl; - switch (bytecode()) { - case Bytecodes::_i2l: // fall through - case Bytecodes::_i2f: // fall through - case Bytecodes::_i2d: // fall through - case Bytecodes::_i2b: // fall through - case Bytecodes::_i2c: // fall through - case Bytecodes::_i2s: tos_in = itos; break; - case Bytecodes::_l2i: // fall through - case Bytecodes::_l2f: // fall through - case Bytecodes::_l2d: tos_in = ltos; break; - case Bytecodes::_f2i: // fall through - case Bytecodes::_f2l: // fall through - case Bytecodes::_f2d: tos_in = ftos; break; - case Bytecodes::_d2i: // fall through - case Bytecodes::_d2l: // fall through - case Bytecodes::_d2f: tos_in = dtos; break; - default : ShouldNotReachHere(); - } - switch (bytecode()) { - case Bytecodes::_l2i: // fall through - case Bytecodes::_f2i: // fall through - case Bytecodes::_d2i: // fall through - case Bytecodes::_i2b: // fall through - case Bytecodes::_i2c: // fall through - case Bytecodes::_i2s: tos_out = itos; break; - case Bytecodes::_i2l: // fall through - case Bytecodes::_f2l: // fall through - case Bytecodes::_d2l: tos_out = ltos; break; - case Bytecodes::_i2f: // fall through - case Bytecodes::_l2f: // fall through - case Bytecodes::_d2f: tos_out = ftos; break; - case Bytecodes::_i2d: // fall through - case Bytecodes::_l2d: // fall through - case Bytecodes::_f2d: tos_out = dtos; break; - default : ShouldNotReachHere(); - } - transition(tos_in, tos_out); - } -#endif // ASSERT - - // Conversion - // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation) - switch (bytecode()) { - case Bytecodes::_i2l: - __ extend_sign(rdx, rax); - break; - case Bytecodes::_i2f: - __ push(rax); // store int on tos - __ fild_s(at_rsp()); // load int to ST0 - __ f2ieee(); // truncate to float size - __ pop(rcx); // adjust rsp - break; - case Bytecodes::_i2d: - __ push(rax); // add one slot for d2ieee() - __ push(rax); // store int on tos - __ fild_s(at_rsp()); // load int to ST0 - __ d2ieee(); // truncate to double size - __ pop(rcx); // adjust rsp - __ pop(rcx); - break; - case Bytecodes::_i2b: - __ shll(rax, 24); // truncate upper 24 bits - __ sarl(rax, 24); // and sign-extend byte - LP64_ONLY(__ movsbl(rax, rax)); - break; - case Bytecodes::_i2c: - __ andl(rax, 0xFFFF); // truncate upper 16 bits - LP64_ONLY(__ movzwl(rax, rax)); - break; - case Bytecodes::_i2s: - __ shll(rax, 16); // truncate upper 16 bits - __ sarl(rax, 16); // and sign-extend short - LP64_ONLY(__ movswl(rax, rax)); - break; - case Bytecodes::_l2i: - /* nothing to do */ - break; - case Bytecodes::_l2f: - __ push(rdx); // store long on tos - __ push(rax); - __ fild_d(at_rsp()); // load long to ST0 - __ f2ieee(); // truncate to float size - __ pop(rcx); // adjust rsp - __ pop(rcx); - break; - case Bytecodes::_l2d: - __ push(rdx); // store long on tos - __ push(rax); - __ fild_d(at_rsp()); // load long to ST0 - __ d2ieee(); // truncate to double size - __ pop(rcx); // adjust rsp - __ pop(rcx); - break; - case Bytecodes::_f2i: - __ push(rcx); // reserve space for argument - __ fstp_s(at_rsp()); // pass float argument on stack - __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1); - break; - case Bytecodes::_f2l: - __ push(rcx); // reserve space for argument - __ fstp_s(at_rsp()); // pass float argument on stack - __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1); - break; - case Bytecodes::_f2d: - /* nothing to do */ - break; - case Bytecodes::_d2i: - __ push(rcx); // reserve space for argument - __ push(rcx); - __ fstp_d(at_rsp()); // pass double argument on stack - __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2); - break; - case Bytecodes::_d2l: - __ push(rcx); // reserve space for argument - __ push(rcx); - __ fstp_d(at_rsp()); // pass double argument on stack - __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2); - break; - case Bytecodes::_d2f: - __ push(rcx); // reserve space for f2ieee() - __ f2ieee(); // truncate to float size - __ pop(rcx); // adjust rsp - break; - default : - ShouldNotReachHere(); - } -} - - -void TemplateTable::lcmp() { - transition(ltos, itos); - // y = rdx:rax - __ pop_l(rbx, rcx); // get x = rcx:rbx - __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y) - __ mov(rax, rcx); -} - - -void TemplateTable::float_cmp(bool is_float, int unordered_result) { - if (is_float) { - __ fld_s(at_rsp()); - } else { - __ fld_d(at_rsp()); - __ pop(rdx); - } - __ pop(rcx); - __ fcmp2int(rax, unordered_result < 0); -} - - -void TemplateTable::branch(bool is_jsr, bool is_wide) { - __ get_method(rcx); // ECX holds method - __ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count - - const ByteSize be_offset = MethodCounters::backedge_counter_offset() + - InvocationCounter::counter_offset(); - const ByteSize inv_offset = MethodCounters::invocation_counter_offset() + - InvocationCounter::counter_offset(); - - // Load up EDX with the branch displacement - if (is_wide) { - __ movl(rdx, at_bcp(1)); - } else { - __ load_signed_short(rdx, at_bcp(1)); - } - __ bswapl(rdx); - if (!is_wide) __ sarl(rdx, 16); - LP64_ONLY(__ movslq(rdx, rdx)); - - - // Handle all the JSR stuff here, then exit. - // It's much shorter and cleaner than intermingling with the - // non-JSR normal-branch stuff occurring below. - if (is_jsr) { - // Pre-load the next target bytecode into EBX - __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0)); - - // compute return address as bci in rax, - __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(ConstMethod::codes_offset()))); - __ subptr(rax, Address(rcx, Method::const_offset())); - // Adjust the bcp in RSI by the displacement in EDX - __ addptr(rsi, rdx); - // Push return address - __ push_i(rax); - // jsr returns vtos - __ dispatch_only_noverify(vtos); - return; - } - - // Normal (non-jsr) branch handling - - // Adjust the bcp in RSI by the displacement in EDX - __ addptr(rsi, rdx); - - assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters"); - Label backedge_counter_overflow; - Label profile_method; - Label dispatch; - if (UseLoopCounter) { - // increment backedge counter for backward branches - // rax,: MDO - // rbx,: MDO bumped taken-count - // rcx: method - // rdx: target offset - // rsi: target bcp - // rdi: locals pointer - __ testl(rdx, rdx); // check if forward or backward branch - __ jcc(Assembler::positive, dispatch); // count only if backward branch - - // check if MethodCounters exists - Label has_counters; - __ movptr(rax, Address(rcx, Method::method_counters_offset())); - __ testptr(rax, rax); - __ jcc(Assembler::notZero, has_counters); - __ push(rdx); - __ push(rcx); - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), - rcx); - __ pop(rcx); - __ pop(rdx); - __ movptr(rax, Address(rcx, Method::method_counters_offset())); - __ testptr(rax, rax); - __ jcc(Assembler::zero, dispatch); - __ bind(has_counters); - - if (TieredCompilation) { - Label no_mdo; - int increment = InvocationCounter::count_increment; - if (ProfileInterpreter) { - // Are we profiling? - __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset()))); - __ testptr(rbx, rbx); - __ jccb(Assembler::zero, no_mdo); - // Increment the MDO backedge counter - const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) + - in_bytes(InvocationCounter::counter_offset())); - const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset())); - __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, - rax, false, Assembler::zero, &backedge_counter_overflow); - __ jmp(dispatch); - } - __ bind(no_mdo); - // Increment backedge counter in MethodCounters* - __ movptr(rcx, Address(rcx, Method::method_counters_offset())); - const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset())); - __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask, - rax, false, Assembler::zero, &backedge_counter_overflow); - } else { // not TieredCompilation - // increment counter - __ movptr(rcx, Address(rcx, Method::method_counters_offset())); - __ movl(rax, Address(rcx, be_offset)); // load backedge counter - __ incrementl(rax, InvocationCounter::count_increment); // increment counter - __ movl(Address(rcx, be_offset), rax); // store counter - - __ movl(rax, Address(rcx, inv_offset)); // load invocation counter - - __ andl(rax, InvocationCounter::count_mask_value); // and the status bits - __ addl(rax, Address(rcx, be_offset)); // add both counters - - if (ProfileInterpreter) { - // Test to see if we should create a method data oop - __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); - __ jcc(Assembler::less, dispatch); - - // if no method data exists, go to profile method - __ test_method_data_pointer(rax, profile_method); - - if (UseOnStackReplacement) { - // check for overflow against rbx, which is the MDO taken count - __ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()))); - __ jcc(Assembler::below, dispatch); - - // When ProfileInterpreter is on, the backedge_count comes from the - // MethodData*, which value does not get reset on the call to - // frequency_counter_overflow(). To avoid excessive calls to the overflow - // routine while the method is being compiled, add a second test to make - // sure the overflow function is called only once every overflow_frequency. - const int overflow_frequency = 1024; - __ andptr(rbx, overflow_frequency-1); - __ jcc(Assembler::zero, backedge_counter_overflow); - } - } else { - if (UseOnStackReplacement) { - // check for overflow against rax, which is the sum of the counters - __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()))); - __ jcc(Assembler::aboveEqual, backedge_counter_overflow); - - } - } - } - __ bind(dispatch); - } - - // Pre-load the next target bytecode into EBX - __ load_unsigned_byte(rbx, Address(rsi, 0)); - - // continue with the bytecode @ target - // rax,: return bci for jsr's, unused otherwise - // rbx,: target bytecode - // rsi: target bcp - __ dispatch_only(vtos); - - if (UseLoopCounter) { - if (ProfileInterpreter) { - // Out-of-line code to allocate method data oop. - __ bind(profile_method); - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); - __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode - __ set_method_data_pointer_for_bcp(); - __ jmp(dispatch); - } - - if (UseOnStackReplacement) { - - // invocation counter overflow - __ bind(backedge_counter_overflow); - __ negptr(rdx); - __ addptr(rdx, rsi); // branch bcp - call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rdx); - __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode - - // rax,: osr nmethod (osr ok) or NULL (osr not possible) - // rbx,: target bytecode - // rdx: scratch - // rdi: locals pointer - // rsi: bcp - __ testptr(rax, rax); // test result - __ jcc(Assembler::zero, dispatch); // no osr if null - // nmethod may have been invalidated (VM may block upon call_VM return) - __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use); - __ jcc(Assembler::notEqual, dispatch); - - // We have the address of an on stack replacement routine in rax, - // We need to prepare to execute the OSR method. First we must - // migrate the locals and monitors off of the stack. - - __ mov(rbx, rax); // save the nmethod - - __ get_thread(rcx); - call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); - // rax, is OSR buffer, move it to expected parameter location - __ mov(rcx, rax); - - // pop the interpreter frame - __ movptr(rdx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp - __ leave(); // remove frame anchor - __ pop(rdi); // get return address - __ mov(rsp, rdx); // set sp to sender sp - - // Align stack pointer for compiled code (note that caller is - // responsible for undoing this fixup by remembering the old SP - // in an rbp,-relative location) - __ andptr(rsp, -(StackAlignmentInBytes)); - - // push the (possibly adjusted) return address - __ push(rdi); - - // and begin the OSR nmethod - __ jmp(Address(rbx, nmethod::osr_entry_point_offset())); - } - } -} - - -void TemplateTable::if_0cmp(Condition cc) { - transition(itos, vtos); - // assume branch is more often taken than not (loops use backward branches) - Label not_taken; - __ testl(rax, rax); - __ jcc(j_not(cc), not_taken); - branch(false, false); - __ bind(not_taken); - __ profile_not_taken_branch(rax); -} - - -void TemplateTable::if_icmp(Condition cc) { - transition(itos, vtos); - // assume branch is more often taken than not (loops use backward branches) - Label not_taken; - __ pop_i(rdx); - __ cmpl(rdx, rax); - __ jcc(j_not(cc), not_taken); - branch(false, false); - __ bind(not_taken); - __ profile_not_taken_branch(rax); -} - - -void TemplateTable::if_nullcmp(Condition cc) { - transition(atos, vtos); - // assume branch is more often taken than not (loops use backward branches) - Label not_taken; - __ testptr(rax, rax); - __ jcc(j_not(cc), not_taken); - branch(false, false); - __ bind(not_taken); - __ profile_not_taken_branch(rax); -} - - -void TemplateTable::if_acmp(Condition cc) { - transition(atos, vtos); - // assume branch is more often taken than not (loops use backward branches) - Label not_taken; - __ pop_ptr(rdx); - __ cmpptr(rdx, rax); - __ jcc(j_not(cc), not_taken); - branch(false, false); - __ bind(not_taken); - __ profile_not_taken_branch(rax); -} - - -void TemplateTable::ret() { - transition(vtos, vtos); - locals_index(rbx); - __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp - __ profile_ret(rbx, rcx); - __ get_method(rax); - __ movptr(rsi, Address(rax, Method::const_offset())); - __ lea(rsi, Address(rsi, rbx, Address::times_1, - ConstMethod::codes_offset())); - __ dispatch_next(vtos); -} - - -void TemplateTable::wide_ret() { - transition(vtos, vtos); - locals_index_wide(rbx); - __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp - __ profile_ret(rbx, rcx); - __ get_method(rax); - __ movptr(rsi, Address(rax, Method::const_offset())); - __ lea(rsi, Address(rsi, rbx, Address::times_1, ConstMethod::codes_offset())); - __ dispatch_next(vtos); -} - - -void TemplateTable::tableswitch() { - Label default_case, continue_execution; - transition(itos, vtos); - // align rsi - __ lea(rbx, at_bcp(wordSize)); - __ andptr(rbx, -wordSize); - // load lo & hi - __ movl(rcx, Address(rbx, 1 * wordSize)); - __ movl(rdx, Address(rbx, 2 * wordSize)); - __ bswapl(rcx); - __ bswapl(rdx); - // check against lo & hi - __ cmpl(rax, rcx); - __ jccb(Assembler::less, default_case); - __ cmpl(rax, rdx); - __ jccb(Assembler::greater, default_case); - // lookup dispatch offset - __ subl(rax, rcx); - __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt)); - __ profile_switch_case(rax, rbx, rcx); - // continue execution - __ bind(continue_execution); - __ bswapl(rdx); - __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1)); - __ addptr(rsi, rdx); - __ dispatch_only(vtos); - // handle default - __ bind(default_case); - __ profile_switch_default(rax); - __ movl(rdx, Address(rbx, 0)); - __ jmp(continue_execution); -} - - -void TemplateTable::lookupswitch() { - transition(itos, itos); - __ stop("lookupswitch bytecode should have been rewritten"); -} - - -void TemplateTable::fast_linearswitch() { - transition(itos, vtos); - Label loop_entry, loop, found, continue_execution; - // bswapl rax, so we can avoid bswapping the table entries - __ bswapl(rax); - // align rsi - __ lea(rbx, at_bcp(wordSize)); // btw: should be able to get rid of this instruction (change offsets below) - __ andptr(rbx, -wordSize); - // set counter - __ movl(rcx, Address(rbx, wordSize)); - __ bswapl(rcx); - __ jmpb(loop_entry); - // table search - __ bind(loop); - __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * wordSize)); - __ jccb(Assembler::equal, found); - __ bind(loop_entry); - __ decrementl(rcx); - __ jcc(Assembler::greaterEqual, loop); - // default case - __ profile_switch_default(rax); - __ movl(rdx, Address(rbx, 0)); - __ jmpb(continue_execution); - // entry found -> get offset - __ bind(found); - __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * wordSize)); - __ profile_switch_case(rcx, rax, rbx); - // continue execution - __ bind(continue_execution); - __ bswapl(rdx); - __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1)); - __ addptr(rsi, rdx); - __ dispatch_only(vtos); -} - - -void TemplateTable::fast_binaryswitch() { - transition(itos, vtos); - // Implementation using the following core algorithm: - // - // int binary_search(int key, LookupswitchPair* array, int n) { - // // Binary search according to "Methodik des Programmierens" by - // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. - // int i = 0; - // int j = n; - // while (i+1 < j) { - // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) - // // with Q: for all i: 0 <= i < n: key < a[i] - // // where a stands for the array and assuming that the (inexisting) - // // element a[n] is infinitely big. - // int h = (i + j) >> 1; - // // i < h < j - // if (key < array[h].fast_match()) { - // j = h; - // } else { - // i = h; - // } - // } - // // R: a[i] <= key < a[i+1] or Q - // // (i.e., if key is within array, i is the correct index) - // return i; - // } - - // register allocation - const Register key = rax; // already set (tosca) - const Register array = rbx; - const Register i = rcx; - const Register j = rdx; - const Register h = rdi; // needs to be restored - const Register temp = rsi; - // setup array - __ save_bcp(); - - __ lea(array, at_bcp(3*wordSize)); // btw: should be able to get rid of this instruction (change offsets below) - __ andptr(array, -wordSize); - // initialize i & j - __ xorl(i, i); // i = 0; - __ movl(j, Address(array, -wordSize)); // j = length(array); - // Convert j into native byteordering - __ bswapl(j); - // and start - Label entry; - __ jmp(entry); - - // binary search loop - { Label loop; - __ bind(loop); - // int h = (i + j) >> 1; - __ leal(h, Address(i, j, Address::times_1)); // h = i + j; - __ sarl(h, 1); // h = (i + j) >> 1; - // if (key < array[h].fast_match()) { - // j = h; - // } else { - // i = h; - // } - // Convert array[h].match to native byte-ordering before compare - __ movl(temp, Address(array, h, Address::times_8, 0*wordSize)); - __ bswapl(temp); - __ cmpl(key, temp); - // j = h if (key < array[h].fast_match()) - __ cmov32(Assembler::less , j, h); - // i = h if (key >= array[h].fast_match()) - __ cmov32(Assembler::greaterEqual, i, h); - // while (i+1 < j) - __ bind(entry); - __ leal(h, Address(i, 1)); // i+1 - __ cmpl(h, j); // i+1 < j - __ jcc(Assembler::less, loop); - } - - // end of binary search, result index is i (must check again!) - Label default_case; - // Convert array[i].match to native byte-ordering before compare - __ movl(temp, Address(array, i, Address::times_8, 0*wordSize)); - __ bswapl(temp); - __ cmpl(key, temp); - __ jcc(Assembler::notEqual, default_case); - - // entry found -> j = offset - __ movl(j , Address(array, i, Address::times_8, 1*wordSize)); - __ profile_switch_case(i, key, array); - __ bswapl(j); - LP64_ONLY(__ movslq(j, j)); - __ restore_bcp(); - __ restore_locals(); // restore rdi - __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1)); - - __ addptr(rsi, j); - __ dispatch_only(vtos); - - // default case -> j = default offset - __ bind(default_case); - __ profile_switch_default(i); - __ movl(j, Address(array, -2*wordSize)); - __ bswapl(j); - LP64_ONLY(__ movslq(j, j)); - __ restore_bcp(); - __ restore_locals(); // restore rdi - __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1)); - __ addptr(rsi, j); - __ dispatch_only(vtos); -} - - -void TemplateTable::_return(TosState state) { - transition(state, state); - assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation - - if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { - assert(state == vtos, "only valid state"); - __ movptr(rax, aaddress(0)); - __ load_klass(rdi, rax); - __ movl(rdi, Address(rdi, Klass::access_flags_offset())); - __ testl(rdi, JVM_ACC_HAS_FINALIZER); - Label skip_register_finalizer; - __ jcc(Assembler::zero, skip_register_finalizer); - - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), rax); - - __ bind(skip_register_finalizer); - } - - __ remove_activation(state, rsi); - __ jmp(rsi); -} - - -// ---------------------------------------------------------------------------- -// Volatile variables demand their effects be made known to all CPU's in -// order. Store buffers on most chips allow reads & writes to reorder; the -// JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of -// memory barrier (i.e., it's not sufficient that the interpreter does not -// reorder volatile references, the hardware also must not reorder them). -// -// According to the new Java Memory Model (JMM): -// (1) All volatiles are serialized wrt to each other. -// ALSO reads & writes act as aquire & release, so: -// (2) A read cannot let unrelated NON-volatile memory refs that happen after -// the read float up to before the read. It's OK for non-volatile memory refs -// that happen before the volatile read to float down below it. -// (3) Similar a volatile write cannot let unrelated NON-volatile memory refs -// that happen BEFORE the write float down to after the write. It's OK for -// non-volatile memory refs that happen after the volatile write to float up -// before it. -// -// We only put in barriers around volatile refs (they are expensive), not -// _between_ memory refs (that would require us to track the flavor of the -// previous memory refs). Requirements (2) and (3) require some barriers -// before volatile stores and after volatile loads. These nearly cover -// requirement (1) but miss the volatile-store-volatile-load case. This final -// case is placed after volatile-stores although it could just as well go -// before volatile-loads. -void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) { - // Helper function to insert a is-volatile test and memory barrier - if( !os::is_MP() ) return; // Not needed on single CPU - __ membar(order_constraint); -} - -void TemplateTable::resolve_cache_and_index(int byte_no, - Register Rcache, - Register index, - size_t index_size) { - const Register temp = rbx; - assert_different_registers(Rcache, index, temp); - - Label resolved; - assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); - __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size); - __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode? - __ jcc(Assembler::equal, resolved); - - // resolve first time through - address entry; - switch (bytecode()) { - case Bytecodes::_getstatic : // fall through - case Bytecodes::_putstatic : // fall through - case Bytecodes::_getfield : // fall through - case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break; - case Bytecodes::_invokevirtual : // fall through - case Bytecodes::_invokespecial : // fall through - case Bytecodes::_invokestatic : // fall through - case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; - case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break; - case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; - default: - fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode()))); - break; - } - __ movl(temp, (int)bytecode()); - __ call_VM(noreg, entry, temp); - // Update registers with resolved info - __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); - __ bind(resolved); -} - - -// The cache and index registers must be set before call -void TemplateTable::load_field_cp_cache_entry(Register obj, - Register cache, - Register index, - Register off, - Register flags, - bool is_static = false) { - assert_different_registers(cache, index, flags, off); - - ByteSize cp_base_offset = ConstantPoolCache::base_offset(); - // Field offset - __ movptr(off, Address(cache, index, Address::times_ptr, - in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()))); - // Flags - __ movl(flags, Address(cache, index, Address::times_ptr, - in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()))); - - // klass overwrite register - if (is_static) { - __ movptr(obj, Address(cache, index, Address::times_ptr, - in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset()))); - const int mirror_offset = in_bytes(Klass::java_mirror_offset()); - __ movptr(obj, Address(obj, mirror_offset)); - } -} - -void TemplateTable::load_invoke_cp_cache_entry(int byte_no, - Register method, - Register itable_index, - Register flags, - bool is_invokevirtual, - bool is_invokevfinal, /*unused*/ - bool is_invokedynamic) { - // setup registers - const Register cache = rcx; - const Register index = rdx; - assert_different_registers(method, flags); - assert_different_registers(method, cache, index); - assert_different_registers(itable_index, flags); - assert_different_registers(itable_index, cache, index); - // determine constant pool cache field offsets - assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); - const int method_offset = in_bytes( - ConstantPoolCache::base_offset() + - ((byte_no == f2_byte) - ? ConstantPoolCacheEntry::f2_offset() - : ConstantPoolCacheEntry::f1_offset())); - const int flags_offset = in_bytes(ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::flags_offset()); - // access constant pool cache fields - const int index_offset = in_bytes(ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::f2_offset()); - - size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); - resolve_cache_and_index(byte_no, cache, index, index_size); - __ movptr(method, Address(cache, index, Address::times_ptr, method_offset)); - - if (itable_index != noreg) { - __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset)); - } - __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset)); -} - - -// The registers cache and index expected to be set before call. -// Correct values of the cache and index registers are preserved. -void TemplateTable::jvmti_post_field_access(Register cache, - Register index, - bool is_static, - bool has_tos) { - if (JvmtiExport::can_post_field_access()) { - // Check to see if a field access watch has been set before we take - // the time to call into the VM. - Label L1; - assert_different_registers(cache, index, rax); - __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); - __ testl(rax,rax); - __ jcc(Assembler::zero, L1); - - // cache entry pointer - __ addptr(cache, in_bytes(ConstantPoolCache::base_offset())); - __ shll(index, LogBytesPerWord); - __ addptr(cache, index); - if (is_static) { - __ xorptr(rax, rax); // NULL object reference - } else { - __ pop(atos); // Get the object - __ verify_oop(rax); - __ push(atos); // Restore stack state - } - // rax,: object pointer or NULL - // cache: cache entry pointer - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), - rax, cache); - __ get_cache_and_index_at_bcp(cache, index, 1); - __ bind(L1); - } -} - -void TemplateTable::pop_and_check_object(Register r) { - __ pop_ptr(r); - __ null_check(r); // for field access must check obj. - __ verify_oop(r); -} - -void TemplateTable::getfield_or_static(int byte_no, bool is_static) { - transition(vtos, vtos); - - const Register cache = rcx; - const Register index = rdx; - const Register obj = rcx; - const Register off = rbx; - const Register flags = rax; - - resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); - jvmti_post_field_access(cache, index, is_static, false); - load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); - - if (!is_static) pop_and_check_object(obj); - - const Address lo(obj, off, Address::times_1, 0*wordSize); - const Address hi(obj, off, Address::times_1, 1*wordSize); - - Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble; - - __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); - assert(btos == 0, "change code, btos != 0"); - // btos - __ andptr(flags, ConstantPoolCacheEntry::tos_state_mask); - __ jcc(Assembler::notZero, notByte); - - __ load_signed_byte(rax, lo ); - __ push(btos); - // Rewrite bytecode to be faster - if (!is_static) { - patch_bytecode(Bytecodes::_fast_bgetfield, rcx, rbx); - } - __ jmp(Done); - - __ bind(notByte); - // itos - __ cmpl(flags, itos ); - __ jcc(Assembler::notEqual, notInt); - - __ movl(rax, lo ); - __ push(itos); - // Rewrite bytecode to be faster - if (!is_static) { - patch_bytecode(Bytecodes::_fast_igetfield, rcx, rbx); - } - __ jmp(Done); - - __ bind(notInt); - // atos - __ cmpl(flags, atos ); - __ jcc(Assembler::notEqual, notObj); - - __ movl(rax, lo ); - __ push(atos); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_agetfield, rcx, rbx); - } - __ jmp(Done); - - __ bind(notObj); - // ctos - __ cmpl(flags, ctos ); - __ jcc(Assembler::notEqual, notChar); - - __ load_unsigned_short(rax, lo ); - __ push(ctos); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_cgetfield, rcx, rbx); - } - __ jmp(Done); - - __ bind(notChar); - // stos - __ cmpl(flags, stos ); - __ jcc(Assembler::notEqual, notShort); - - __ load_signed_short(rax, lo ); - __ push(stos); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_sgetfield, rcx, rbx); - } - __ jmp(Done); - - __ bind(notShort); - // ltos - __ cmpl(flags, ltos ); - __ jcc(Assembler::notEqual, notLong); - - // Generate code as if volatile. There just aren't enough registers to - // save that information and this code is faster than the test. - __ fild_d(lo); // Must load atomically - __ subptr(rsp,2*wordSize); // Make space for store - __ fistp_d(Address(rsp,0)); - __ pop(rax); - __ pop(rdx); - - __ push(ltos); - // Don't rewrite to _fast_lgetfield for potential volatile case. - __ jmp(Done); - - __ bind(notLong); - // ftos - __ cmpl(flags, ftos ); - __ jcc(Assembler::notEqual, notFloat); - - __ fld_s(lo); - __ push(ftos); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_fgetfield, rcx, rbx); - } - __ jmp(Done); - - __ bind(notFloat); - // dtos - __ cmpl(flags, dtos ); - __ jcc(Assembler::notEqual, notDouble); - - __ fld_d(lo); - __ push(dtos); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_dgetfield, rcx, rbx); - } - __ jmpb(Done); - - __ bind(notDouble); - - __ stop("Bad state"); - - __ bind(Done); - // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO). - // volatile_barrier( ); -} - - -void TemplateTable::getfield(int byte_no) { - getfield_or_static(byte_no, false); -} - - -void TemplateTable::getstatic(int byte_no) { - getfield_or_static(byte_no, true); -} - -// The registers cache and index expected to be set before call. -// The function may destroy various registers, just not the cache and index registers. -void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { - - ByteSize cp_base_offset = ConstantPoolCache::base_offset(); - - if (JvmtiExport::can_post_field_modification()) { - // Check to see if a field modification watch has been set before we take - // the time to call into the VM. - Label L1; - assert_different_registers(cache, index, rax); - __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); - __ testl(rax, rax); - __ jcc(Assembler::zero, L1); - - // The cache and index registers have been already set. - // This allows to eliminate this call but the cache and index - // registers have to be correspondingly used after this line. - __ get_cache_and_index_at_bcp(rax, rdx, 1); - - if (is_static) { - // Life is simple. Null out the object pointer. - __ xorptr(rbx, rbx); - } else { - // Life is harder. The stack holds the value on top, followed by the object. - // We don't know the size of the value, though; it could be one or two words - // depending on its type. As a result, we must find the type to determine where - // the object is. - Label two_word, valsize_known; - __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset + - ConstantPoolCacheEntry::flags_offset()))); - __ mov(rbx, rsp); - __ shrl(rcx, ConstantPoolCacheEntry::tos_state_shift); - // Make sure we don't need to mask rcx after the above shift - ConstantPoolCacheEntry::verify_tos_state_shift(); - __ cmpl(rcx, ltos); - __ jccb(Assembler::equal, two_word); - __ cmpl(rcx, dtos); - __ jccb(Assembler::equal, two_word); - __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos) - __ jmpb(valsize_known); - - __ bind(two_word); - __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue - - __ bind(valsize_known); - // setup object pointer - __ movptr(rbx, Address(rbx, 0)); - } - // cache entry pointer - __ addptr(rax, in_bytes(cp_base_offset)); - __ shll(rdx, LogBytesPerWord); - __ addptr(rax, rdx); - // object (tos) - __ mov(rcx, rsp); - // rbx,: object pointer set up above (NULL if static) - // rax,: cache entry pointer - // rcx: jvalue object on the stack - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), - rbx, rax, rcx); - __ get_cache_and_index_at_bcp(cache, index, 1); - __ bind(L1); - } -} - - -void TemplateTable::putfield_or_static(int byte_no, bool is_static) { - transition(vtos, vtos); - - const Register cache = rcx; - const Register index = rdx; - const Register obj = rcx; - const Register off = rbx; - const Register flags = rax; - - resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); - jvmti_post_field_mod(cache, index, is_static); - load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); - - // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO). - // volatile_barrier( ); - - Label notVolatile, Done; - __ movl(rdx, flags); - __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); - __ andl(rdx, 0x1); - - // field addresses - const Address lo(obj, off, Address::times_1, 0*wordSize); - const Address hi(obj, off, Address::times_1, 1*wordSize); - - Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble; - - __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); - assert(btos == 0, "change code, btos != 0"); - __ andl(flags, ConstantPoolCacheEntry::tos_state_mask); - __ jcc(Assembler::notZero, notByte); - - // btos - { - __ pop(btos); - if (!is_static) pop_and_check_object(obj); - __ movb(lo, rax); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx, true, byte_no); - } - __ jmp(Done); - } - - __ bind(notByte); - __ cmpl(flags, itos); - __ jcc(Assembler::notEqual, notInt); - - // itos - { - __ pop(itos); - if (!is_static) pop_and_check_object(obj); - __ movl(lo, rax); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx, true, byte_no); - } - __ jmp(Done); - } - - __ bind(notInt); - __ cmpl(flags, atos); - __ jcc(Assembler::notEqual, notObj); - - // atos - { - __ pop(atos); - if (!is_static) pop_and_check_object(obj); - do_oop_store(_masm, lo, rax, _bs->kind(), false); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx, true, byte_no); - } - __ jmp(Done); - } - - __ bind(notObj); - __ cmpl(flags, ctos); - __ jcc(Assembler::notEqual, notChar); - - // ctos - { - __ pop(ctos); - if (!is_static) pop_and_check_object(obj); - __ movw(lo, rax); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx, true, byte_no); - } - __ jmp(Done); - } - - __ bind(notChar); - __ cmpl(flags, stos); - __ jcc(Assembler::notEqual, notShort); - - // stos - { - __ pop(stos); - if (!is_static) pop_and_check_object(obj); - __ movw(lo, rax); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx, true, byte_no); - } - __ jmp(Done); - } - - __ bind(notShort); - __ cmpl(flags, ltos); - __ jcc(Assembler::notEqual, notLong); - - // ltos - { - Label notVolatileLong; - __ testl(rdx, rdx); - __ jcc(Assembler::zero, notVolatileLong); - - __ pop(ltos); // overwrites rdx, do this after testing volatile. - if (!is_static) pop_and_check_object(obj); - - // Replace with real volatile test - __ push(rdx); - __ push(rax); // Must update atomically with FIST - __ fild_d(Address(rsp,0)); // So load into FPU register - __ fistp_d(lo); // and put into memory atomically - __ addptr(rsp, 2*wordSize); - // volatile_barrier(); - volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | - Assembler::StoreStore)); - // Don't rewrite volatile version - __ jmp(notVolatile); - - __ bind(notVolatileLong); - - __ pop(ltos); // overwrites rdx - if (!is_static) pop_and_check_object(obj); - NOT_LP64(__ movptr(hi, rdx)); - __ movptr(lo, rax); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx, true, byte_no); - } - __ jmp(notVolatile); - } - - __ bind(notLong); - __ cmpl(flags, ftos); - __ jcc(Assembler::notEqual, notFloat); - - // ftos - { - __ pop(ftos); - if (!is_static) pop_and_check_object(obj); - __ fstp_s(lo); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx, true, byte_no); - } - __ jmp(Done); - } - - __ bind(notFloat); -#ifdef ASSERT - __ cmpl(flags, dtos); - __ jcc(Assembler::notEqual, notDouble); -#endif - - // dtos - { - __ pop(dtos); - if (!is_static) pop_and_check_object(obj); - __ fstp_d(lo); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx, true, byte_no); - } - __ jmp(Done); - } - -#ifdef ASSERT - __ bind(notDouble); - __ stop("Bad state"); -#endif - - __ bind(Done); - - // Check for volatile store - __ testl(rdx, rdx); - __ jcc(Assembler::zero, notVolatile); - volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | - Assembler::StoreStore)); - __ bind(notVolatile); -} - - -void TemplateTable::putfield(int byte_no) { - putfield_or_static(byte_no, false); -} - - -void TemplateTable::putstatic(int byte_no) { - putfield_or_static(byte_no, true); -} - -void TemplateTable::jvmti_post_fast_field_mod() { - if (JvmtiExport::can_post_field_modification()) { - // Check to see if a field modification watch has been set before we take - // the time to call into the VM. - Label L2; - __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); - __ testl(rcx,rcx); - __ jcc(Assembler::zero, L2); - __ pop_ptr(rbx); // copy the object pointer from tos - __ verify_oop(rbx); - __ push_ptr(rbx); // put the object pointer back on tos - - // Save tos values before call_VM() clobbers them. Since we have - // to do it for every data type, we use the saved values as the - // jvalue object. - switch (bytecode()) { // load values into the jvalue object - case Bytecodes::_fast_aputfield: __ push_ptr(rax); break; - case Bytecodes::_fast_bputfield: // fall through - case Bytecodes::_fast_sputfield: // fall through - case Bytecodes::_fast_cputfield: // fall through - case Bytecodes::_fast_iputfield: __ push_i(rax); break; - case Bytecodes::_fast_dputfield: __ push_d(); break; - case Bytecodes::_fast_fputfield: __ push_f(); break; - case Bytecodes::_fast_lputfield: __ push_l(rax); break; - - default: - ShouldNotReachHere(); - } - __ mov(rcx, rsp); // points to jvalue on the stack - // access constant pool cache entry - __ get_cache_entry_pointer_at_bcp(rax, rdx, 1); - __ verify_oop(rbx); - // rbx,: object pointer copied above - // rax,: cache entry pointer - // rcx: jvalue object on the stack - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx); - - switch (bytecode()) { // restore tos values - case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break; - case Bytecodes::_fast_bputfield: // fall through - case Bytecodes::_fast_sputfield: // fall through - case Bytecodes::_fast_cputfield: // fall through - case Bytecodes::_fast_iputfield: __ pop_i(rax); break; - case Bytecodes::_fast_dputfield: __ pop_d(); break; - case Bytecodes::_fast_fputfield: __ pop_f(); break; - case Bytecodes::_fast_lputfield: __ pop_l(rax); break; - } - __ bind(L2); - } -} - -void TemplateTable::fast_storefield(TosState state) { - transition(state, vtos); - - ByteSize base = ConstantPoolCache::base_offset(); - - jvmti_post_fast_field_mod(); - - // access constant pool cache - __ get_cache_and_index_at_bcp(rcx, rbx, 1); - - // test for volatile with rdx but rdx is tos register for lputfield. - if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx); - __ movl(rdx, Address(rcx, rbx, Address::times_ptr, in_bytes(base + - ConstantPoolCacheEntry::flags_offset()))); - - // replace index with field offset from cache entry - __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); - - // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO). - // volatile_barrier( ); - - Label notVolatile, Done; - __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); - __ andl(rdx, 0x1); - // Check for volatile store - __ testl(rdx, rdx); - __ jcc(Assembler::zero, notVolatile); - - if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx); - - // Get object from stack - pop_and_check_object(rcx); - - // field addresses - const Address lo(rcx, rbx, Address::times_1, 0*wordSize); - const Address hi(rcx, rbx, Address::times_1, 1*wordSize); - - // access field - switch (bytecode()) { - case Bytecodes::_fast_bputfield: __ movb(lo, rax); break; - case Bytecodes::_fast_sputfield: // fall through - case Bytecodes::_fast_cputfield: __ movw(lo, rax); break; - case Bytecodes::_fast_iputfield: __ movl(lo, rax); break; - case Bytecodes::_fast_lputfield: - NOT_LP64(__ movptr(hi, rdx)); - __ movptr(lo, rax); - break; - case Bytecodes::_fast_fputfield: __ fstp_s(lo); break; - case Bytecodes::_fast_dputfield: __ fstp_d(lo); break; - case Bytecodes::_fast_aputfield: { - do_oop_store(_masm, lo, rax, _bs->kind(), false); - break; - } - default: - ShouldNotReachHere(); - } - - Label done; - volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | - Assembler::StoreStore)); - // Barriers are so large that short branch doesn't reach! - __ jmp(done); - - // Same code as above, but don't need rdx to test for volatile. - __ bind(notVolatile); - - if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx); - - // Get object from stack - pop_and_check_object(rcx); - - // access field - switch (bytecode()) { - case Bytecodes::_fast_bputfield: __ movb(lo, rax); break; - case Bytecodes::_fast_sputfield: // fall through - case Bytecodes::_fast_cputfield: __ movw(lo, rax); break; - case Bytecodes::_fast_iputfield: __ movl(lo, rax); break; - case Bytecodes::_fast_lputfield: - NOT_LP64(__ movptr(hi, rdx)); - __ movptr(lo, rax); - break; - case Bytecodes::_fast_fputfield: __ fstp_s(lo); break; - case Bytecodes::_fast_dputfield: __ fstp_d(lo); break; - case Bytecodes::_fast_aputfield: { - do_oop_store(_masm, lo, rax, _bs->kind(), false); - break; - } - default: - ShouldNotReachHere(); - } - __ bind(done); -} - - -void TemplateTable::fast_accessfield(TosState state) { - transition(atos, state); - - // do the JVMTI work here to avoid disturbing the register state below - if (JvmtiExport::can_post_field_access()) { - // Check to see if a field access watch has been set before we take - // the time to call into the VM. - Label L1; - __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); - __ testl(rcx,rcx); - __ jcc(Assembler::zero, L1); - // access constant pool cache entry - __ get_cache_entry_pointer_at_bcp(rcx, rdx, 1); - __ push_ptr(rax); // save object pointer before call_VM() clobbers it - __ verify_oop(rax); - // rax,: object pointer copied above - // rcx: cache entry pointer - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx); - __ pop_ptr(rax); // restore object pointer - __ bind(L1); - } - - // access constant pool cache - __ get_cache_and_index_at_bcp(rcx, rbx, 1); - // replace index with field offset from cache entry - __ movptr(rbx, Address(rcx, - rbx, - Address::times_ptr, - in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()))); - - - // rax,: object - __ verify_oop(rax); - __ null_check(rax); - // field addresses - const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize); - const Address hi = Address(rax, rbx, Address::times_1, 1*wordSize); - - // access field - switch (bytecode()) { - case Bytecodes::_fast_bgetfield: __ movsbl(rax, lo ); break; - case Bytecodes::_fast_sgetfield: __ load_signed_short(rax, lo ); break; - case Bytecodes::_fast_cgetfield: __ load_unsigned_short(rax, lo ); break; - case Bytecodes::_fast_igetfield: __ movl(rax, lo); break; - case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten"); break; - case Bytecodes::_fast_fgetfield: __ fld_s(lo); break; - case Bytecodes::_fast_dgetfield: __ fld_d(lo); break; - case Bytecodes::_fast_agetfield: __ movptr(rax, lo); __ verify_oop(rax); break; - default: - ShouldNotReachHere(); - } - - // Doug Lea believes this is not needed with current Sparcs(TSO) and Intel(PSO) - // volatile_barrier( ); -} - -void TemplateTable::fast_xaccess(TosState state) { - transition(vtos, state); - // get receiver - __ movptr(rax, aaddress(0)); - // access constant pool cache - __ get_cache_and_index_at_bcp(rcx, rdx, 2); - __ movptr(rbx, Address(rcx, - rdx, - Address::times_ptr, - in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()))); - // make sure exception is reported in correct bcp range (getfield is next instruction) - __ increment(rsi); - __ null_check(rax); - const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize); - if (state == itos) { - __ movl(rax, lo); - } else if (state == atos) { - __ movptr(rax, lo); - __ verify_oop(rax); - } else if (state == ftos) { - __ fld_s(lo); - } else { - ShouldNotReachHere(); - } - __ decrement(rsi); -} - - - -//---------------------------------------------------------------------------------------------------- -// Calls - -void TemplateTable::count_calls(Register method, Register temp) { - // implemented elsewhere - ShouldNotReachHere(); -} - - -void TemplateTable::prepare_invoke(int byte_no, - Register method, // linked method (or i-klass) - Register index, // itable index, MethodType, etc. - Register recv, // if caller wants to see it - Register flags // if caller wants to test it - ) { - // determine flags - const Bytecodes::Code code = bytecode(); - const bool is_invokeinterface = code == Bytecodes::_invokeinterface; - const bool is_invokedynamic = code == Bytecodes::_invokedynamic; - const bool is_invokehandle = code == Bytecodes::_invokehandle; - const bool is_invokevirtual = code == Bytecodes::_invokevirtual; - const bool is_invokespecial = code == Bytecodes::_invokespecial; - const bool load_receiver = (recv != noreg); - const bool save_flags = (flags != noreg); - assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); - assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal"); - assert(flags == noreg || flags == rdx, ""); - assert(recv == noreg || recv == rcx, ""); - - // setup registers & access constant pool cache - if (recv == noreg) recv = rcx; - if (flags == noreg) flags = rdx; - assert_different_registers(method, index, recv, flags); - - // save 'interpreter return address' - __ save_bcp(); - - load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); - - // maybe push appendix to arguments (just before return address) - if (is_invokedynamic || is_invokehandle) { - Label L_no_push; - __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift)); - __ jccb(Assembler::zero, L_no_push); - // Push the appendix as a trailing parameter. - // This must be done before we get the receiver, - // since the parameter_size includes it. - __ push(rbx); - __ mov(rbx, index); - assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0"); - __ load_resolved_reference_at_index(index, rbx); - __ pop(rbx); - __ push(index); // push appendix (MethodType, CallSite, etc.) - __ bind(L_no_push); - } - - // load receiver if needed (note: no return address pushed yet) - if (load_receiver) { - __ movl(recv, flags); - __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask); - const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address - const int receiver_is_at_end = -1; // back off one slot to get receiver - Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end); - __ movptr(recv, recv_addr); - __ verify_oop(recv); - } - - if (save_flags) { - __ mov(rsi, flags); - } - - // compute return type - __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); - // Make sure we don't need to mask flags after the above shift - ConstantPoolCacheEntry::verify_tos_state_shift(); - // load return address - { - const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); - ExternalAddress table(table_addr); - __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr))); - } - - // push return address - __ push(flags); - - // Restore flags value from the constant pool cache, and restore rsi - // for later null checks. rsi is the bytecode pointer - if (save_flags) { - __ mov(flags, rsi); - __ restore_bcp(); - } -} - - -void TemplateTable::invokevirtual_helper(Register index, - Register recv, - Register flags) { - // Uses temporary registers rax, rdx - assert_different_registers(index, recv, rax, rdx); - assert(index == rbx, ""); - assert(recv == rcx, ""); - - // Test for an invoke of a final method - Label notFinal; - __ movl(rax, flags); - __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift)); - __ jcc(Assembler::zero, notFinal); - - const Register method = index; // method must be rbx - assert(method == rbx, - "Method* must be rbx for interpreter calling convention"); - - // do the call - the index is actually the method to call - // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method* - - // It's final, need a null check here! - __ null_check(recv); - - // profile this call - __ profile_final_call(rax); - __ profile_arguments_type(rax, method, rsi, true); - - __ jump_from_interpreted(method, rax); - - __ bind(notFinal); - - // get receiver klass - __ null_check(recv, oopDesc::klass_offset_in_bytes()); - __ load_klass(rax, recv); - - // profile this call - __ profile_virtual_call(rax, rdi, rdx); - - // get target Method* & entry point - __ lookup_virtual_method(rax, index, method); - __ profile_arguments_type(rdx, method, rsi, true); - __ jump_from_interpreted(method, rdx); -} - - -void TemplateTable::invokevirtual(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f2_byte, "use this argument"); - prepare_invoke(byte_no, - rbx, // method or vtable index - noreg, // unused itable index - rcx, rdx); // recv, flags - - // rbx: index - // rcx: receiver - // rdx: flags - - invokevirtual_helper(rbx, rcx, rdx); -} - - -void TemplateTable::invokespecial(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f1_byte, "use this argument"); - prepare_invoke(byte_no, rbx, noreg, // get f1 Method* - rcx); // get receiver also for null check - __ verify_oop(rcx); - __ null_check(rcx); - // do the call - __ profile_call(rax); - __ profile_arguments_type(rax, rbx, rsi, false); - __ jump_from_interpreted(rbx, rax); -} - - -void TemplateTable::invokestatic(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f1_byte, "use this argument"); - prepare_invoke(byte_no, rbx); // get f1 Method* - // do the call - __ profile_call(rax); - __ profile_arguments_type(rax, rbx, rsi, false); - __ jump_from_interpreted(rbx, rax); -} - - -void TemplateTable::fast_invokevfinal(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f2_byte, "use this argument"); - __ stop("fast_invokevfinal not used on x86"); -} - - -void TemplateTable::invokeinterface(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f1_byte, "use this argument"); - prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 itable index - rcx, rdx); // recv, flags - - // rax: interface klass (from f1) - // rbx: itable index (from f2) - // rcx: receiver - // rdx: flags - - // Special case of invokeinterface called for virtual method of - // java.lang.Object. See cpCacheOop.cpp for details. - // This code isn't produced by javac, but could be produced by - // another compliant java compiler. - Label notMethod; - __ movl(rdi, rdx); - __ andl(rdi, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift)); - __ jcc(Assembler::zero, notMethod); - - invokevirtual_helper(rbx, rcx, rdx); - __ bind(notMethod); - - // Get receiver klass into rdx - also a null check - __ restore_locals(); // restore rdi - __ null_check(rcx, oopDesc::klass_offset_in_bytes()); - __ load_klass(rdx, rcx); - - // profile this call - __ profile_virtual_call(rdx, rsi, rdi); - - Label no_such_interface, no_such_method; - - __ lookup_interface_method(// inputs: rec. class, interface, itable index - rdx, rax, rbx, - // outputs: method, scan temp. reg - rbx, rsi, - no_such_interface); - - // rbx: Method* to call - // rcx: receiver - // Check for abstract method error - // Note: This should be done more efficiently via a throw_abstract_method_error - // interpreter entry point and a conditional jump to it in case of a null - // method. - __ testptr(rbx, rbx); - __ jcc(Assembler::zero, no_such_method); - - __ profile_arguments_type(rdx, rbx, rsi, true); - - // do the call - // rcx: receiver - // rbx,: Method* - __ jump_from_interpreted(rbx, rdx); - __ should_not_reach_here(); - - // exception handling code follows... - // note: must restore interpreter registers to canonical - // state for exception handling to work correctly! - - __ bind(no_such_method); - // throw exception - __ pop(rbx); // pop return address (pushed by prepare_invoke) - __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) - __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); - // the call_VM checks for exception, so we should never return here. - __ should_not_reach_here(); - - __ bind(no_such_interface); - // throw exception - __ pop(rbx); // pop return address (pushed by prepare_invoke) - __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) - __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) - __ call_VM(noreg, CAST_FROM_FN_PTR(address, - InterpreterRuntime::throw_IncompatibleClassChangeError)); - // the call_VM checks for exception, so we should never return here. - __ should_not_reach_here(); -} - -void TemplateTable::invokehandle(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f1_byte, "use this argument"); - const Register rbx_method = rbx; - const Register rax_mtype = rax; - const Register rcx_recv = rcx; - const Register rdx_flags = rdx; - - prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv); - __ verify_method_ptr(rbx_method); - __ verify_oop(rcx_recv); - __ null_check(rcx_recv); - - // rax: MethodType object (from cpool->resolved_references[f1], if necessary) - // rbx: MH.invokeExact_MT method (from f2) - - // Note: rax_mtype is already pushed (if necessary) by prepare_invoke - - // FIXME: profile the LambdaForm also - __ profile_final_call(rax); - __ profile_arguments_type(rdx, rbx_method, rsi, true); - - __ jump_from_interpreted(rbx_method, rdx); -} - - -void TemplateTable::invokedynamic(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f1_byte, "use this argument"); - - const Register rbx_method = rbx; - const Register rax_callsite = rax; - - prepare_invoke(byte_no, rbx_method, rax_callsite); - - // rax: CallSite object (from cpool->resolved_references[f1]) - // rbx: MH.linkToCallSite method (from f2) - - // Note: rax_callsite is already pushed by prepare_invoke - - // %%% should make a type profile for any invokedynamic that takes a ref argument - // profile this call - __ profile_call(rsi); - __ profile_arguments_type(rdx, rbx, rsi, false); - - __ verify_oop(rax_callsite); - - __ jump_from_interpreted(rbx_method, rdx); -} - -//---------------------------------------------------------------------------------------------------- -// Allocation - -void TemplateTable::_new() { - transition(vtos, atos); - __ get_unsigned_2_byte_index_at_bcp(rdx, 1); - Label slow_case; - Label slow_case_no_pop; - Label done; - Label initialize_header; - Label initialize_object; // including clearing the fields - Label allocate_shared; - - __ get_cpool_and_tags(rcx, rax); - - // Make sure the class we're about to instantiate has been resolved. - // This is done before loading InstanceKlass to be consistent with the order - // how Constant Pool is updated (see ConstantPool::klass_at_put) - const int tags_offset = Array::base_offset_in_bytes(); - __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class); - __ jcc(Assembler::notEqual, slow_case_no_pop); - - // get InstanceKlass - __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(ConstantPool))); - __ push(rcx); // save the contexts of klass for initializing the header - - // make sure klass is initialized & doesn't have finalizer - // make sure klass is fully initialized - __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); - __ jcc(Assembler::notEqual, slow_case); - - // get instance_size in InstanceKlass (scaled to a count of bytes) - __ movl(rdx, Address(rcx, Klass::layout_helper_offset())); - // test to see if it has a finalizer or is malformed in some way - __ testl(rdx, Klass::_lh_instance_slow_path_bit); - __ jcc(Assembler::notZero, slow_case); - - // - // Allocate the instance - // 1) Try to allocate in the TLAB - // 2) if fail and the object is large allocate in the shared Eden - // 3) if the above fails (or is not applicable), go to a slow case - // (creates a new TLAB, etc.) - - const bool allow_shared_alloc = - Universe::heap()->supports_inline_contig_alloc(); - - const Register thread = rcx; - if (UseTLAB || allow_shared_alloc) { - __ get_thread(thread); - } - - if (UseTLAB) { - __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset()))); - __ lea(rbx, Address(rax, rdx, Address::times_1)); - __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset()))); - __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case); - __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx); - if (ZeroTLAB) { - // the fields have been already cleared - __ jmp(initialize_header); - } else { - // initialize both the header and fields - __ jmp(initialize_object); - } - } - - // Allocation in the shared Eden, if allowed. - // - // rdx: instance size in bytes - if (allow_shared_alloc) { - __ bind(allocate_shared); - - ExternalAddress heap_top((address)Universe::heap()->top_addr()); - - Label retry; - __ bind(retry); - __ movptr(rax, heap_top); - __ lea(rbx, Address(rax, rdx, Address::times_1)); - __ cmpptr(rbx, ExternalAddress((address)Universe::heap()->end_addr())); - __ jcc(Assembler::above, slow_case); - - // Compare rax, with the top addr, and if still equal, store the new - // top addr in rbx, at the address of the top addr pointer. Sets ZF if was - // equal, and clears it otherwise. Use lock prefix for atomicity on MPs. - // - // rax,: object begin - // rbx,: object end - // rdx: instance size in bytes - __ locked_cmpxchgptr(rbx, heap_top); - - // if someone beat us on the allocation, try again, otherwise continue - __ jcc(Assembler::notEqual, retry); - - __ incr_allocated_bytes(thread, rdx, 0); - } - - if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { - // The object is initialized before the header. If the object size is - // zero, go directly to the header initialization. - __ bind(initialize_object); - __ decrement(rdx, sizeof(oopDesc)); - __ jcc(Assembler::zero, initialize_header); - - // Initialize topmost object field, divide rdx by 8, check if odd and - // test if zero. - __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code) - __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd - - // rdx must have been multiple of 8 -#ifdef ASSERT - // make sure rdx was multiple of 8 - Label L; - // Ignore partial flag stall after shrl() since it is debug VM - __ jccb(Assembler::carryClear, L); - __ stop("object size is not multiple of 2 - adjust this code"); - __ bind(L); - // rdx must be > 0, no extra check needed here -#endif - - // initialize remaining object fields: rdx was a multiple of 8 - { Label loop; - __ bind(loop); - __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx); - NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx)); - __ decrement(rdx); - __ jcc(Assembler::notZero, loop); - } - - // initialize object header only. - __ bind(initialize_header); - if (UseBiasedLocking) { - __ pop(rcx); // get saved klass back in the register. - __ movptr(rbx, Address(rcx, Klass::prototype_header_offset())); - __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx); - } else { - __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), - (int32_t)markOopDesc::prototype()); // header - __ pop(rcx); // get saved klass back in the register. - } - __ store_klass(rax, rcx); // klass - - { - SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0); - // Trigger dtrace event for fastpath - __ push(atos); - __ call_VM_leaf( - CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax); - __ pop(atos); - } - - __ jmp(done); - } - - // slow case - __ bind(slow_case); - __ pop(rcx); // restore stack pointer to what it was when we came in. - __ bind(slow_case_no_pop); - __ get_constant_pool(rax); - __ get_unsigned_2_byte_index_at_bcp(rdx, 1); - call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rax, rdx); - - // continue - __ bind(done); -} - - -void TemplateTable::newarray() { - transition(itos, atos); - __ push_i(rax); // make sure everything is on the stack - __ load_unsigned_byte(rdx, at_bcp(1)); - call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), rdx, rax); - __ pop_i(rdx); // discard size -} - - -void TemplateTable::anewarray() { - transition(itos, atos); - __ get_unsigned_2_byte_index_at_bcp(rdx, 1); - __ get_constant_pool(rcx); - call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), rcx, rdx, rax); -} - - -void TemplateTable::arraylength() { - transition(atos, itos); - __ null_check(rax, arrayOopDesc::length_offset_in_bytes()); - __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes())); -} - - -void TemplateTable::checkcast() { - transition(atos, atos); - Label done, is_null, ok_is_subtype, quicked, resolved; - __ testptr(rax, rax); // Object is in EAX - __ jcc(Assembler::zero, is_null); - - // Get cpool & tags index - __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array - __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index - // See if bytecode has already been quicked - __ cmpb(Address(rdx, rbx, Address::times_1, Array::base_offset_in_bytes()), JVM_CONSTANT_Class); - __ jcc(Assembler::equal, quicked); - - __ push(atos); - call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); - // vm_result_2 has metadata result - // borrow rdi from locals - __ get_thread(rdi); - __ get_vm_result_2(rax, rdi); - __ restore_locals(); - __ pop_ptr(rdx); - __ jmpb(resolved); - - // Get superklass in EAX and subklass in EBX - __ bind(quicked); - __ mov(rdx, rax); // Save object in EDX; EAX needed for subtype check - __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(ConstantPool))); - - __ bind(resolved); - __ load_klass(rbx, rdx); - - // Generate subtype check. Blows ECX. Resets EDI. Object in EDX. - // Superklass in EAX. Subklass in EBX. - __ gen_subtype_check( rbx, ok_is_subtype ); - - // Come here on failure - __ push(rdx); - // object is at TOS - __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry)); - - // Come here on success - __ bind(ok_is_subtype); - __ mov(rax,rdx); // Restore object in EDX - - // Collect counts on whether this check-cast sees NULLs a lot or not. - if (ProfileInterpreter) { - __ jmp(done); - __ bind(is_null); - __ profile_null_seen(rcx); - } else { - __ bind(is_null); // same as 'done' - } - __ bind(done); -} - - -void TemplateTable::instanceof() { - transition(atos, itos); - Label done, is_null, ok_is_subtype, quicked, resolved; - __ testptr(rax, rax); - __ jcc(Assembler::zero, is_null); - - // Get cpool & tags index - __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array - __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index - // See if bytecode has already been quicked - __ cmpb(Address(rdx, rbx, Address::times_1, Array::base_offset_in_bytes()), JVM_CONSTANT_Class); - __ jcc(Assembler::equal, quicked); - - __ push(atos); - call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); - // vm_result_2 has metadata result - // borrow rdi from locals - __ get_thread(rdi); - __ get_vm_result_2(rax, rdi); - __ restore_locals(); - __ pop_ptr(rdx); - __ load_klass(rdx, rdx); - __ jmp(resolved); - - // Get superklass in EAX and subklass in EDX - __ bind(quicked); - __ load_klass(rdx, rax); - __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(ConstantPool))); - - __ bind(resolved); - - // Generate subtype check. Blows ECX. Resets EDI. - // Superklass in EAX. Subklass in EDX. - __ gen_subtype_check( rdx, ok_is_subtype ); - - // Come here on failure - __ xorl(rax,rax); - __ jmpb(done); - // Come here on success - __ bind(ok_is_subtype); - __ movl(rax, 1); - - // Collect counts on whether this test sees NULLs a lot or not. - if (ProfileInterpreter) { - __ jmp(done); - __ bind(is_null); - __ profile_null_seen(rcx); - } else { - __ bind(is_null); // same as 'done' - } - __ bind(done); - // rax, = 0: obj == NULL or obj is not an instanceof the specified klass - // rax, = 1: obj != NULL and obj is an instanceof the specified klass -} - - -//---------------------------------------------------------------------------------------------------- -// Breakpoints -void TemplateTable::_breakpoint() { - - // Note: We get here even if we are single stepping.. - // jbug inists on setting breakpoints at every bytecode - // even if we are in single step mode. - - transition(vtos, vtos); - - // get the unpatched byte code - __ get_method(rcx); - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), rcx, rsi); - __ mov(rbx, rax); - - // post the breakpoint event - __ get_method(rcx); - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), rcx, rsi); - - // complete the execution of original bytecode - __ dispatch_only_normal(vtos); -} - - -//---------------------------------------------------------------------------------------------------- -// Exceptions - -void TemplateTable::athrow() { - transition(atos, vtos); - __ null_check(rax); - __ jump(ExternalAddress(Interpreter::throw_exception_entry())); -} - - -//---------------------------------------------------------------------------------------------------- -// Synchronization -// -// Note: monitorenter & exit are symmetric routines; which is reflected -// in the assembly code structure as well -// -// Stack layout: -// -// [expressions ] <--- rsp = expression stack top -// .. -// [expressions ] -// [monitor entry] <--- monitor block top = expression stack bot -// .. -// [monitor entry] -// [frame data ] <--- monitor block bot -// ... -// [saved rbp, ] <--- rbp, - - -void TemplateTable::monitorenter() { - transition(atos, vtos); - - // check for NULL object - __ null_check(rax); - - const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); - const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize); - const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize); - Label allocated; - - // initialize entry pointer - __ xorl(rdx, rdx); // points to free slot or NULL - - // find a free slot in the monitor block (result in rdx) - { Label entry, loop, exit; - __ movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry - - __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block - __ jmpb(entry); - - __ bind(loop); - __ cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used - __ cmovptr(Assembler::equal, rdx, rcx); // if not used then remember entry in rdx - __ cmpptr(rax, Address(rcx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object - __ jccb(Assembler::equal, exit); // if same object then stop searching - __ addptr(rcx, entry_size); // otherwise advance to next entry - __ bind(entry); - __ cmpptr(rcx, rbx); // check if bottom reached - __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry - __ bind(exit); - } - - __ testptr(rdx, rdx); // check if a slot has been found - __ jccb(Assembler::notZero, allocated); // if found, continue with that one - - // allocate one if there's no free slot - { Label entry, loop; - // 1. compute new pointers // rsp: old expression stack top - __ movptr(rdx, monitor_block_bot); // rdx: old expression stack bottom - __ subptr(rsp, entry_size); // move expression stack top - __ subptr(rdx, entry_size); // move expression stack bottom - __ mov(rcx, rsp); // set start value for copy loop - __ movptr(monitor_block_bot, rdx); // set new monitor block top - __ jmp(entry); - // 2. move expression stack contents - __ bind(loop); - __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location - __ movptr(Address(rcx, 0), rbx); // and store it at new location - __ addptr(rcx, wordSize); // advance to next word - __ bind(entry); - __ cmpptr(rcx, rdx); // check if bottom reached - __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word - } - - // call run-time routine - // rdx: points to monitor entry - __ bind(allocated); - - // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. - // The object has already been poped from the stack, so the expression stack looks correct. - __ increment(rsi); - - __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), rax); // store object - __ lock_object(rdx); - - // check to make sure this monitor doesn't cause stack overflow after locking - __ save_bcp(); // in case of exception - __ generate_stack_overflow_check(0); - - // The bcp has already been incremented. Just need to dispatch to next instruction. - __ dispatch_next(vtos); -} - - -void TemplateTable::monitorexit() { - transition(atos, vtos); - - // check for NULL object - __ null_check(rax); - - const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); - const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize); - const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize); - Label found; - - // find matching slot - { Label entry, loop; - __ movptr(rdx, monitor_block_top); // points to current entry, starting with top-most entry - __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block - __ jmpb(entry); - - __ bind(loop); - __ cmpptr(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object - __ jcc(Assembler::equal, found); // if same object then stop searching - __ addptr(rdx, entry_size); // otherwise advance to next entry - __ bind(entry); - __ cmpptr(rdx, rbx); // check if bottom reached - __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry - } - - // error handling. Unlocking was not block-structured - Label end; - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); - __ should_not_reach_here(); - - // call run-time routine - // rcx: points to monitor entry - __ bind(found); - __ push_ptr(rax); // make sure object is on stack (contract with oopMaps) - __ unlock_object(rdx); - __ pop_ptr(rax); // discard object - __ bind(end); -} - - -//---------------------------------------------------------------------------------------------------- -// Wide instructions - -void TemplateTable::wide() { - transition(vtos, vtos); - __ load_unsigned_byte(rbx, at_bcp(1)); - ExternalAddress wtable((address)Interpreter::_wentry_point); - __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr))); - // Note: the rsi increment step is part of the individual wide bytecode implementations -} - - -//---------------------------------------------------------------------------------------------------- -// Multi arrays - -void TemplateTable::multianewarray() { - transition(vtos, atos); - __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions - // last dim is on top of stack; we want address of first one: - // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize - // the latter wordSize to point to the beginning of the array. - __ lea( rax, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize)); - call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rax); // pass in rax, - __ load_unsigned_byte(rbx, at_bcp(3)); - __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts -} - -#endif /* !CC_INTERP */ diff -r 17efac395638 -r a46e5922bb40 hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp --- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Thu Mar 19 16:13:40 2015 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,3741 +0,0 @@ -/* - * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "asm/macroAssembler.hpp" -#include "interpreter/interpreter.hpp" -#include "interpreter/interpreterRuntime.hpp" -#include "interpreter/interp_masm.hpp" -#include "interpreter/templateTable.hpp" -#include "memory/universe.inline.hpp" -#include "oops/methodData.hpp" -#include "oops/objArrayKlass.hpp" -#include "oops/oop.inline.hpp" -#include "prims/methodHandles.hpp" -#include "runtime/sharedRuntime.hpp" -#include "runtime/stubRoutines.hpp" -#include "runtime/synchronizer.hpp" -#include "utilities/macros.hpp" - -#ifndef CC_INTERP - -#define __ _masm-> - -// Platform-dependent initialization - -void TemplateTable::pd_initialize() { - // No amd64 specific initialization -} - -// Address computation: local variables - -static inline Address iaddress(int n) { - return Address(r14, Interpreter::local_offset_in_bytes(n)); -} - -static inline Address laddress(int n) { - return iaddress(n + 1); -} - -static inline Address faddress(int n) { - return iaddress(n); -} - -static inline Address daddress(int n) { - return laddress(n); -} - -static inline Address aaddress(int n) { - return iaddress(n); -} - -static inline Address iaddress(Register r) { - return Address(r14, r, Address::times_8); -} - -static inline Address laddress(Register r) { - return Address(r14, r, Address::times_8, Interpreter::local_offset_in_bytes(1)); -} - -static inline Address faddress(Register r) { - return iaddress(r); -} - -static inline Address daddress(Register r) { - return laddress(r); -} - -static inline Address aaddress(Register r) { - return iaddress(r); -} - -static inline Address at_rsp() { - return Address(rsp, 0); -} - -// At top of Java expression stack which may be different than esp(). It -// isn't for category 1 objects. -static inline Address at_tos () { - return Address(rsp, Interpreter::expr_offset_in_bytes(0)); -} - -static inline Address at_tos_p1() { - return Address(rsp, Interpreter::expr_offset_in_bytes(1)); -} - -static inline Address at_tos_p2() { - return Address(rsp, Interpreter::expr_offset_in_bytes(2)); -} - -// Condition conversion -static Assembler::Condition j_not(TemplateTable::Condition cc) { - switch (cc) { - case TemplateTable::equal : return Assembler::notEqual; - case TemplateTable::not_equal : return Assembler::equal; - case TemplateTable::less : return Assembler::greaterEqual; - case TemplateTable::less_equal : return Assembler::greater; - case TemplateTable::greater : return Assembler::lessEqual; - case TemplateTable::greater_equal: return Assembler::less; - } - ShouldNotReachHere(); - return Assembler::zero; -} - - -// Miscelaneous helper routines -// Store an oop (or NULL) at the address described by obj. -// If val == noreg this means store a NULL - -static void do_oop_store(InterpreterMacroAssembler* _masm, - Address obj, - Register val, - BarrierSet::Name barrier, - bool precise) { - assert(val == noreg || val == rax, "parameter is just for looks"); - switch (barrier) { -#if INCLUDE_ALL_GCS - case BarrierSet::G1SATBCT: - case BarrierSet::G1SATBCTLogging: - { - // flatten object address if needed - if (obj.index() == noreg && obj.disp() == 0) { - if (obj.base() != rdx) { - __ movq(rdx, obj.base()); - } - } else { - __ leaq(rdx, obj); - } - __ g1_write_barrier_pre(rdx /* obj */, - rbx /* pre_val */, - r15_thread /* thread */, - r8 /* tmp */, - val != noreg /* tosca_live */, - false /* expand_call */); - if (val == noreg) { - __ store_heap_oop_null(Address(rdx, 0)); - } else { - // G1 barrier needs uncompressed oop for region cross check. - Register new_val = val; - if (UseCompressedOops) { - new_val = rbx; - __ movptr(new_val, val); - } - __ store_heap_oop(Address(rdx, 0), val); - __ g1_write_barrier_post(rdx /* store_adr */, - new_val /* new_val */, - r15_thread /* thread */, - r8 /* tmp */, - rbx /* tmp2 */); - } - } - break; -#endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableModRef: - case BarrierSet::CardTableExtension: - { - if (val == noreg) { - __ store_heap_oop_null(obj); - } else { - __ store_heap_oop(obj, val); - // flatten object address if needed - if (!precise || (obj.index() == noreg && obj.disp() == 0)) { - __ store_check(obj.base()); - } else { - __ leaq(rdx, obj); - __ store_check(rdx); - } - } - } - break; - case BarrierSet::ModRef: - if (val == noreg) { - __ store_heap_oop_null(obj); - } else { - __ store_heap_oop(obj, val); - } - break; - default : - ShouldNotReachHere(); - - } -} - -Address TemplateTable::at_bcp(int offset) { - assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); - return Address(r13, offset); -} - -void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, - Register temp_reg, bool load_bc_into_bc_reg/*=true*/, - int byte_no) { - if (!RewriteBytecodes) return; - Label L_patch_done; - - switch (bc) { - case Bytecodes::_fast_aputfield: - case Bytecodes::_fast_bputfield: - case Bytecodes::_fast_cputfield: - case Bytecodes::_fast_dputfield: - case Bytecodes::_fast_fputfield: - case Bytecodes::_fast_iputfield: - case Bytecodes::_fast_lputfield: - case Bytecodes::_fast_sputfield: - { - // We skip bytecode quickening for putfield instructions when - // the put_code written to the constant pool cache is zero. - // This is required so that every execution of this instruction - // calls out to InterpreterRuntime::resolve_get_put to do - // additional, required work. - assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); - assert(load_bc_into_bc_reg, "we use bc_reg as temp"); - __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1); - __ movl(bc_reg, bc); - __ cmpl(temp_reg, (int) 0); - __ jcc(Assembler::zero, L_patch_done); // don't patch - } - break; - default: - assert(byte_no == -1, "sanity"); - // the pair bytecodes have already done the load. - if (load_bc_into_bc_reg) { - __ movl(bc_reg, bc); - } - } - - if (JvmtiExport::can_post_breakpoint()) { - Label L_fast_patch; - // if a breakpoint is present we can't rewrite the stream directly - __ movzbl(temp_reg, at_bcp(0)); - __ cmpl(temp_reg, Bytecodes::_breakpoint); - __ jcc(Assembler::notEqual, L_fast_patch); - __ get_method(temp_reg); - // Let breakpoint table handling rewrite to quicker bytecode - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, r13, bc_reg); -#ifndef ASSERT - __ jmpb(L_patch_done); -#else - __ jmp(L_patch_done); -#endif - __ bind(L_fast_patch); - } - -#ifdef ASSERT - Label L_okay; - __ load_unsigned_byte(temp_reg, at_bcp(0)); - __ cmpl(temp_reg, (int) Bytecodes::java_code(bc)); - __ jcc(Assembler::equal, L_okay); - __ cmpl(temp_reg, bc_reg); - __ jcc(Assembler::equal, L_okay); - __ stop("patching the wrong bytecode"); - __ bind(L_okay); -#endif - - // patch bytecode - __ movb(at_bcp(0), bc_reg); - __ bind(L_patch_done); -} - - -// Individual instructions - -void TemplateTable::nop() { - transition(vtos, vtos); - // nothing to do -} - -void TemplateTable::shouldnotreachhere() { - transition(vtos, vtos); - __ stop("shouldnotreachhere bytecode"); -} - -void TemplateTable::aconst_null() { - transition(vtos, atos); - __ xorl(rax, rax); -} - -void TemplateTable::iconst(int value) { - transition(vtos, itos); - if (value == 0) { - __ xorl(rax, rax); - } else { - __ movl(rax, value); - } -} - -void TemplateTable::lconst(int value) { - transition(vtos, ltos); - if (value == 0) { - __ xorl(rax, rax); - } else { - __ movl(rax, value); - } -} - -void TemplateTable::fconst(int value) { - transition(vtos, ftos); - static float one = 1.0f, two = 2.0f; - switch (value) { - case 0: - __ xorps(xmm0, xmm0); - break; - case 1: - __ movflt(xmm0, ExternalAddress((address) &one)); - break; - case 2: - __ movflt(xmm0, ExternalAddress((address) &two)); - break; - default: - ShouldNotReachHere(); - break; - } -} - -void TemplateTable::dconst(int value) { - transition(vtos, dtos); - static double one = 1.0; - switch (value) { - case 0: - __ xorpd(xmm0, xmm0); - break; - case 1: - __ movdbl(xmm0, ExternalAddress((address) &one)); - break; - default: - ShouldNotReachHere(); - break; - } -} - -void TemplateTable::bipush() { - transition(vtos, itos); - __ load_signed_byte(rax, at_bcp(1)); -} - -void TemplateTable::sipush() { - transition(vtos, itos); - __ load_unsigned_short(rax, at_bcp(1)); - __ bswapl(rax); - __ sarl(rax, 16); -} - -void TemplateTable::ldc(bool wide) { - transition(vtos, vtos); - Label call_ldc, notFloat, notClass, Done; - - if (wide) { - __ get_unsigned_2_byte_index_at_bcp(rbx, 1); - } else { - __ load_unsigned_byte(rbx, at_bcp(1)); - } - - __ get_cpool_and_tags(rcx, rax); - const int base_offset = ConstantPool::header_size() * wordSize; - const int tags_offset = Array::base_offset_in_bytes(); - - // get type - __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset)); - - // unresolved class - get the resolved class - __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass); - __ jccb(Assembler::equal, call_ldc); - - // unresolved class in error state - call into runtime to throw the error - // from the first resolution attempt - __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError); - __ jccb(Assembler::equal, call_ldc); - - // resolved class - need to call vm to get java mirror of the class - __ cmpl(rdx, JVM_CONSTANT_Class); - __ jcc(Assembler::notEqual, notClass); - - __ bind(call_ldc); - __ movl(c_rarg1, wide); - call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1); - __ push_ptr(rax); - __ verify_oop(rax); - __ jmp(Done); - - __ bind(notClass); - __ cmpl(rdx, JVM_CONSTANT_Float); - __ jccb(Assembler::notEqual, notFloat); - // ftos - __ movflt(xmm0, Address(rcx, rbx, Address::times_8, base_offset)); - __ push_f(); - __ jmp(Done); - - __ bind(notFloat); -#ifdef ASSERT - { - Label L; - __ cmpl(rdx, JVM_CONSTANT_Integer); - __ jcc(Assembler::equal, L); - // String and Object are rewritten to fast_aldc - __ stop("unexpected tag type in ldc"); - __ bind(L); - } -#endif - // itos JVM_CONSTANT_Integer only - __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset)); - __ push_i(rax); - __ bind(Done); -} - -// Fast path for caching oop constants. -void TemplateTable::fast_aldc(bool wide) { - transition(vtos, atos); - - Register result = rax; - Register tmp = rdx; - int index_size = wide ? sizeof(u2) : sizeof(u1); - - Label resolved; - - // We are resolved if the resolved reference cache entry contains a - // non-null object (String, MethodType, etc.) - assert_different_registers(result, tmp); - __ get_cache_index_at_bcp(tmp, 1, index_size); - __ load_resolved_reference_at_index(result, tmp); - __ testl(result, result); - __ jcc(Assembler::notZero, resolved); - - address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); - - // first time invocation - must resolve first - __ movl(tmp, (int)bytecode()); - __ call_VM(result, entry, tmp); - - __ bind(resolved); - - if (VerifyOops) { - __ verify_oop(result); - } -} - -void TemplateTable::ldc2_w() { - transition(vtos, vtos); - Label Long, Done; - __ get_unsigned_2_byte_index_at_bcp(rbx, 1); - - __ get_cpool_and_tags(rcx, rax); - const int base_offset = ConstantPool::header_size() * wordSize; - const int tags_offset = Array::base_offset_in_bytes(); - - // get type - __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), - JVM_CONSTANT_Double); - __ jccb(Assembler::notEqual, Long); - // dtos - __ movdbl(xmm0, Address(rcx, rbx, Address::times_8, base_offset)); - __ push_d(); - __ jmpb(Done); - - __ bind(Long); - // ltos - __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset)); - __ push_l(); - - __ bind(Done); -} - -void TemplateTable::locals_index(Register reg, int offset) { - __ load_unsigned_byte(reg, at_bcp(offset)); - __ negptr(reg); -} - -void TemplateTable::iload() { - transition(vtos, itos); - if (RewriteFrequentPairs) { - Label rewrite, done; - const Register bc = c_rarg3; - assert(rbx != bc, "register damaged"); - - // get next byte - __ load_unsigned_byte(rbx, - at_bcp(Bytecodes::length_for(Bytecodes::_iload))); - // if _iload, wait to rewrite to iload2. We only want to rewrite the - // last two iloads in a pair. Comparing against fast_iload means that - // the next bytecode is neither an iload or a caload, and therefore - // an iload pair. - __ cmpl(rbx, Bytecodes::_iload); - __ jcc(Assembler::equal, done); - - __ cmpl(rbx, Bytecodes::_fast_iload); - __ movl(bc, Bytecodes::_fast_iload2); - __ jccb(Assembler::equal, rewrite); - - // if _caload, rewrite to fast_icaload - __ cmpl(rbx, Bytecodes::_caload); - __ movl(bc, Bytecodes::_fast_icaload); - __ jccb(Assembler::equal, rewrite); - - // rewrite so iload doesn't check again. - __ movl(bc, Bytecodes::_fast_iload); - - // rewrite - // bc: fast bytecode - __ bind(rewrite); - patch_bytecode(Bytecodes::_iload, bc, rbx, false); - __ bind(done); - } - - // Get the local value into tos - locals_index(rbx); - __ movl(rax, iaddress(rbx)); -} - -void TemplateTable::fast_iload2() { - transition(vtos, itos); - locals_index(rbx); - __ movl(rax, iaddress(rbx)); - __ push(itos); - locals_index(rbx, 3); - __ movl(rax, iaddress(rbx)); -} - -void TemplateTable::fast_iload() { - transition(vtos, itos); - locals_index(rbx); - __ movl(rax, iaddress(rbx)); -} - -void TemplateTable::lload() { - transition(vtos, ltos); - locals_index(rbx); - __ movq(rax, laddress(rbx)); -} - -void TemplateTable::fload() { - transition(vtos, ftos); - locals_index(rbx); - __ movflt(xmm0, faddress(rbx)); -} - -void TemplateTable::dload() { - transition(vtos, dtos); - locals_index(rbx); - __ movdbl(xmm0, daddress(rbx)); -} - -void TemplateTable::aload() { - transition(vtos, atos); - locals_index(rbx); - __ movptr(rax, aaddress(rbx)); -} - -void TemplateTable::locals_index_wide(Register reg) { - __ load_unsigned_short(reg, at_bcp(2)); - __ bswapl(reg); - __ shrl(reg, 16); - __ negptr(reg); -} - -void TemplateTable::wide_iload() { - transition(vtos, itos); - locals_index_wide(rbx); - __ movl(rax, iaddress(rbx)); -} - -void TemplateTable::wide_lload() { - transition(vtos, ltos); - locals_index_wide(rbx); - __ movq(rax, laddress(rbx)); -} - -void TemplateTable::wide_fload() { - transition(vtos, ftos); - locals_index_wide(rbx); - __ movflt(xmm0, faddress(rbx)); -} - -void TemplateTable::wide_dload() { - transition(vtos, dtos); - locals_index_wide(rbx); - __ movdbl(xmm0, daddress(rbx)); -} - -void TemplateTable::wide_aload() { - transition(vtos, atos); - locals_index_wide(rbx); - __ movptr(rax, aaddress(rbx)); -} - -void TemplateTable::index_check(Register array, Register index) { - // destroys rbx - // check array - __ null_check(array, arrayOopDesc::length_offset_in_bytes()); - // sign extend index for use by indexed load - __ movl2ptr(index, index); - // check index - __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes())); - if (index != rbx) { - // ??? convention: move aberrant index into ebx for exception message - assert(rbx != array, "different registers"); - __ movl(rbx, index); - } - __ jump_cc(Assembler::aboveEqual, - ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry)); -} - -void TemplateTable::iaload() { - transition(itos, itos); - __ pop_ptr(rdx); - // eax: index - // rdx: array - index_check(rdx, rax); // kills rbx - __ movl(rax, Address(rdx, rax, - Address::times_4, - arrayOopDesc::base_offset_in_bytes(T_INT))); -} - -void TemplateTable::laload() { - transition(itos, ltos); - __ pop_ptr(rdx); - // eax: index - // rdx: array - index_check(rdx, rax); // kills rbx - __ movq(rax, Address(rdx, rbx, - Address::times_8, - arrayOopDesc::base_offset_in_bytes(T_LONG))); -} - -void TemplateTable::faload() { - transition(itos, ftos); - __ pop_ptr(rdx); - // eax: index - // rdx: array - index_check(rdx, rax); // kills rbx - __ movflt(xmm0, Address(rdx, rax, - Address::times_4, - arrayOopDesc::base_offset_in_bytes(T_FLOAT))); -} - -void TemplateTable::daload() { - transition(itos, dtos); - __ pop_ptr(rdx); - // eax: index - // rdx: array - index_check(rdx, rax); // kills rbx - __ movdbl(xmm0, Address(rdx, rax, - Address::times_8, - arrayOopDesc::base_offset_in_bytes(T_DOUBLE))); -} - -void TemplateTable::aaload() { - transition(itos, atos); - __ pop_ptr(rdx); - // eax: index - // rdx: array - index_check(rdx, rax); // kills rbx - __ load_heap_oop(rax, Address(rdx, rax, - UseCompressedOops ? Address::times_4 : Address::times_8, - arrayOopDesc::base_offset_in_bytes(T_OBJECT))); -} - -void TemplateTable::baload() { - transition(itos, itos); - __ pop_ptr(rdx); - // eax: index - // rdx: array - index_check(rdx, rax); // kills rbx - __ load_signed_byte(rax, - Address(rdx, rax, - Address::times_1, - arrayOopDesc::base_offset_in_bytes(T_BYTE))); -} - -void TemplateTable::caload() { - transition(itos, itos); - __ pop_ptr(rdx); - // eax: index - // rdx: array - index_check(rdx, rax); // kills rbx - __ load_unsigned_short(rax, - Address(rdx, rax, - Address::times_2, - arrayOopDesc::base_offset_in_bytes(T_CHAR))); -} - -// iload followed by caload frequent pair -void TemplateTable::fast_icaload() { - transition(vtos, itos); - // load index out of locals - locals_index(rbx); - __ movl(rax, iaddress(rbx)); - - // eax: index - // rdx: array - __ pop_ptr(rdx); - index_check(rdx, rax); // kills rbx - __ load_unsigned_short(rax, - Address(rdx, rax, - Address::times_2, - arrayOopDesc::base_offset_in_bytes(T_CHAR))); -} - -void TemplateTable::saload() { - transition(itos, itos); - __ pop_ptr(rdx); - // eax: index - // rdx: array - index_check(rdx, rax); // kills rbx - __ load_signed_short(rax, - Address(rdx, rax, - Address::times_2, - arrayOopDesc::base_offset_in_bytes(T_SHORT))); -} - -void TemplateTable::iload(int n) { - transition(vtos, itos); - __ movl(rax, iaddress(n)); -} - -void TemplateTable::lload(int n) { - transition(vtos, ltos); - __ movq(rax, laddress(n)); -} - -void TemplateTable::fload(int n) { - transition(vtos, ftos); - __ movflt(xmm0, faddress(n)); -} - -void TemplateTable::dload(int n) { - transition(vtos, dtos); - __ movdbl(xmm0, daddress(n)); -} - -void TemplateTable::aload(int n) { - transition(vtos, atos); - __ movptr(rax, aaddress(n)); -} - -void TemplateTable::aload_0() { - transition(vtos, atos); - // According to bytecode histograms, the pairs: - // - // _aload_0, _fast_igetfield - // _aload_0, _fast_agetfield - // _aload_0, _fast_fgetfield - // - // occur frequently. If RewriteFrequentPairs is set, the (slow) - // _aload_0 bytecode checks if the next bytecode is either - // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then - // rewrites the current bytecode into a pair bytecode; otherwise it - // rewrites the current bytecode into _fast_aload_0 that doesn't do - // the pair check anymore. - // - // Note: If the next bytecode is _getfield, the rewrite must be - // delayed, otherwise we may miss an opportunity for a pair. - // - // Also rewrite frequent pairs - // aload_0, aload_1 - // aload_0, iload_1 - // These bytecodes with a small amount of code are most profitable - // to rewrite - if (RewriteFrequentPairs) { - Label rewrite, done; - const Register bc = c_rarg3; - assert(rbx != bc, "register damaged"); - // get next byte - __ load_unsigned_byte(rbx, - at_bcp(Bytecodes::length_for(Bytecodes::_aload_0))); - - // do actual aload_0 - aload(0); - - // if _getfield then wait with rewrite - __ cmpl(rbx, Bytecodes::_getfield); - __ jcc(Assembler::equal, done); - - // if _igetfield then reqrite to _fast_iaccess_0 - assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == - Bytecodes::_aload_0, - "fix bytecode definition"); - __ cmpl(rbx, Bytecodes::_fast_igetfield); - __ movl(bc, Bytecodes::_fast_iaccess_0); - __ jccb(Assembler::equal, rewrite); - - // if _agetfield then reqrite to _fast_aaccess_0 - assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == - Bytecodes::_aload_0, - "fix bytecode definition"); - __ cmpl(rbx, Bytecodes::_fast_agetfield); - __ movl(bc, Bytecodes::_fast_aaccess_0); - __ jccb(Assembler::equal, rewrite); - - // if _fgetfield then reqrite to _fast_faccess_0 - assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == - Bytecodes::_aload_0, - "fix bytecode definition"); - __ cmpl(rbx, Bytecodes::_fast_fgetfield); - __ movl(bc, Bytecodes::_fast_faccess_0); - __ jccb(Assembler::equal, rewrite); - - // else rewrite to _fast_aload0 - assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == - Bytecodes::_aload_0, - "fix bytecode definition"); - __ movl(bc, Bytecodes::_fast_aload_0); - - // rewrite - // bc: fast bytecode - __ bind(rewrite); - patch_bytecode(Bytecodes::_aload_0, bc, rbx, false); - - __ bind(done); - } else { - aload(0); - } -} - -void TemplateTable::istore() { - transition(itos, vtos); - locals_index(rbx); - __ movl(iaddress(rbx), rax); -} - -void TemplateTable::lstore() { - transition(ltos, vtos); - locals_index(rbx); - __ movq(laddress(rbx), rax); -} - -void TemplateTable::fstore() { - transition(ftos, vtos); - locals_index(rbx); - __ movflt(faddress(rbx), xmm0); -} - -void TemplateTable::dstore() { - transition(dtos, vtos); - locals_index(rbx); - __ movdbl(daddress(rbx), xmm0); -} - -void TemplateTable::astore() { - transition(vtos, vtos); - __ pop_ptr(rax); - locals_index(rbx); - __ movptr(aaddress(rbx), rax); -} - -void TemplateTable::wide_istore() { - transition(vtos, vtos); - __ pop_i(); - locals_index_wide(rbx); - __ movl(iaddress(rbx), rax); -} - -void TemplateTable::wide_lstore() { - transition(vtos, vtos); - __ pop_l(); - locals_index_wide(rbx); - __ movq(laddress(rbx), rax); -} - -void TemplateTable::wide_fstore() { - transition(vtos, vtos); - __ pop_f(); - locals_index_wide(rbx); - __ movflt(faddress(rbx), xmm0); -} - -void TemplateTable::wide_dstore() { - transition(vtos, vtos); - __ pop_d(); - locals_index_wide(rbx); - __ movdbl(daddress(rbx), xmm0); -} - -void TemplateTable::wide_astore() { - transition(vtos, vtos); - __ pop_ptr(rax); - locals_index_wide(rbx); - __ movptr(aaddress(rbx), rax); -} - -void TemplateTable::iastore() { - transition(itos, vtos); - __ pop_i(rbx); - __ pop_ptr(rdx); - // eax: value - // ebx: index - // rdx: array - index_check(rdx, rbx); // prefer index in ebx - __ movl(Address(rdx, rbx, - Address::times_4, - arrayOopDesc::base_offset_in_bytes(T_INT)), - rax); -} - -void TemplateTable::lastore() { - transition(ltos, vtos); - __ pop_i(rbx); - __ pop_ptr(rdx); - // rax: value - // ebx: index - // rdx: array - index_check(rdx, rbx); // prefer index in ebx - __ movq(Address(rdx, rbx, - Address::times_8, - arrayOopDesc::base_offset_in_bytes(T_LONG)), - rax); -} - -void TemplateTable::fastore() { - transition(ftos, vtos); - __ pop_i(rbx); - __ pop_ptr(rdx); - // xmm0: value - // ebx: index - // rdx: array - index_check(rdx, rbx); // prefer index in ebx - __ movflt(Address(rdx, rbx, - Address::times_4, - arrayOopDesc::base_offset_in_bytes(T_FLOAT)), - xmm0); -} - -void TemplateTable::dastore() { - transition(dtos, vtos); - __ pop_i(rbx); - __ pop_ptr(rdx); - // xmm0: value - // ebx: index - // rdx: array - index_check(rdx, rbx); // prefer index in ebx - __ movdbl(Address(rdx, rbx, - Address::times_8, - arrayOopDesc::base_offset_in_bytes(T_DOUBLE)), - xmm0); -} - -void TemplateTable::aastore() { - Label is_null, ok_is_subtype, done; - transition(vtos, vtos); - // stack: ..., array, index, value - __ movptr(rax, at_tos()); // value - __ movl(rcx, at_tos_p1()); // index - __ movptr(rdx, at_tos_p2()); // array - - Address element_address(rdx, rcx, - UseCompressedOops? Address::times_4 : Address::times_8, - arrayOopDesc::base_offset_in_bytes(T_OBJECT)); - - index_check(rdx, rcx); // kills rbx - // do array store check - check for NULL value first - __ testptr(rax, rax); - __ jcc(Assembler::zero, is_null); - - // Move subklass into rbx - __ load_klass(rbx, rax); - // Move superklass into rax - __ load_klass(rax, rdx); - __ movptr(rax, Address(rax, - ObjArrayKlass::element_klass_offset())); - // Compress array + index*oopSize + 12 into a single register. Frees rcx. - __ lea(rdx, element_address); - - // Generate subtype check. Blows rcx, rdi - // Superklass in rax. Subklass in rbx. - __ gen_subtype_check(rbx, ok_is_subtype); - - // Come here on failure - // object is at TOS - __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry)); - - // Come here on success - __ bind(ok_is_subtype); - - // Get the value we will store - __ movptr(rax, at_tos()); - // Now store using the appropriate barrier - do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true); - __ jmp(done); - - // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx] - __ bind(is_null); - __ profile_null_seen(rbx); - - // Store a NULL - do_oop_store(_masm, element_address, noreg, _bs->kind(), true); - - // Pop stack arguments - __ bind(done); - __ addptr(rsp, 3 * Interpreter::stackElementSize); -} - -void TemplateTable::bastore() { - transition(itos, vtos); - __ pop_i(rbx); - __ pop_ptr(rdx); - // eax: value - // ebx: index - // rdx: array - index_check(rdx, rbx); // prefer index in ebx - __ movb(Address(rdx, rbx, - Address::times_1, - arrayOopDesc::base_offset_in_bytes(T_BYTE)), - rax); -} - -void TemplateTable::castore() { - transition(itos, vtos); - __ pop_i(rbx); - __ pop_ptr(rdx); - // eax: value - // ebx: index - // rdx: array - index_check(rdx, rbx); // prefer index in ebx - __ movw(Address(rdx, rbx, - Address::times_2, - arrayOopDesc::base_offset_in_bytes(T_CHAR)), - rax); -} - -void TemplateTable::sastore() { - castore(); -} - -void TemplateTable::istore(int n) { - transition(itos, vtos); - __ movl(iaddress(n), rax); -} - -void TemplateTable::lstore(int n) { - transition(ltos, vtos); - __ movq(laddress(n), rax); -} - -void TemplateTable::fstore(int n) { - transition(ftos, vtos); - __ movflt(faddress(n), xmm0); -} - -void TemplateTable::dstore(int n) { - transition(dtos, vtos); - __ movdbl(daddress(n), xmm0); -} - -void TemplateTable::astore(int n) { - transition(vtos, vtos); - __ pop_ptr(rax); - __ movptr(aaddress(n), rax); -} - -void TemplateTable::pop() { - transition(vtos, vtos); - __ addptr(rsp, Interpreter::stackElementSize); -} - -void TemplateTable::pop2() { - transition(vtos, vtos); - __ addptr(rsp, 2 * Interpreter::stackElementSize); -} - -void TemplateTable::dup() { - transition(vtos, vtos); - __ load_ptr(0, rax); - __ push_ptr(rax); - // stack: ..., a, a -} - -void TemplateTable::dup_x1() { - transition(vtos, vtos); - // stack: ..., a, b - __ load_ptr( 0, rax); // load b - __ load_ptr( 1, rcx); // load a - __ store_ptr(1, rax); // store b - __ store_ptr(0, rcx); // store a - __ push_ptr(rax); // push b - // stack: ..., b, a, b -} - -void TemplateTable::dup_x2() { - transition(vtos, vtos); - // stack: ..., a, b, c - __ load_ptr( 0, rax); // load c - __ load_ptr( 2, rcx); // load a - __ store_ptr(2, rax); // store c in a - __ push_ptr(rax); // push c - // stack: ..., c, b, c, c - __ load_ptr( 2, rax); // load b - __ store_ptr(2, rcx); // store a in b - // stack: ..., c, a, c, c - __ store_ptr(1, rax); // store b in c - // stack: ..., c, a, b, c -} - -void TemplateTable::dup2() { - transition(vtos, vtos); - // stack: ..., a, b - __ load_ptr(1, rax); // load a - __ push_ptr(rax); // push a - __ load_ptr(1, rax); // load b - __ push_ptr(rax); // push b - // stack: ..., a, b, a, b -} - -void TemplateTable::dup2_x1() { - transition(vtos, vtos); - // stack: ..., a, b, c - __ load_ptr( 0, rcx); // load c - __ load_ptr( 1, rax); // load b - __ push_ptr(rax); // push b - __ push_ptr(rcx); // push c - // stack: ..., a, b, c, b, c - __ store_ptr(3, rcx); // store c in b - // stack: ..., a, c, c, b, c - __ load_ptr( 4, rcx); // load a - __ store_ptr(2, rcx); // store a in 2nd c - // stack: ..., a, c, a, b, c - __ store_ptr(4, rax); // store b in a - // stack: ..., b, c, a, b, c -} - -void TemplateTable::dup2_x2() { - transition(vtos, vtos); - // stack: ..., a, b, c, d - __ load_ptr( 0, rcx); // load d - __ load_ptr( 1, rax); // load c - __ push_ptr(rax); // push c - __ push_ptr(rcx); // push d - // stack: ..., a, b, c, d, c, d - __ load_ptr( 4, rax); // load b - __ store_ptr(2, rax); // store b in d - __ store_ptr(4, rcx); // store d in b - // stack: ..., a, d, c, b, c, d - __ load_ptr( 5, rcx); // load a - __ load_ptr( 3, rax); // load c - __ store_ptr(3, rcx); // store a in c - __ store_ptr(5, rax); // store c in a - // stack: ..., c, d, a, b, c, d -} - -void TemplateTable::swap() { - transition(vtos, vtos); - // stack: ..., a, b - __ load_ptr( 1, rcx); // load a - __ load_ptr( 0, rax); // load b - __ store_ptr(0, rcx); // store a in b - __ store_ptr(1, rax); // store b in a - // stack: ..., b, a -} - -void TemplateTable::iop2(Operation op) { - transition(itos, itos); - switch (op) { - case add : __ pop_i(rdx); __ addl (rax, rdx); break; - case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break; - case mul : __ pop_i(rdx); __ imull(rax, rdx); break; - case _and : __ pop_i(rdx); __ andl (rax, rdx); break; - case _or : __ pop_i(rdx); __ orl (rax, rdx); break; - case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break; - case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break; - case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break; - case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break; - default : ShouldNotReachHere(); - } -} - -void TemplateTable::lop2(Operation op) { - transition(ltos, ltos); - switch (op) { - case add : __ pop_l(rdx); __ addptr(rax, rdx); break; - case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break; - case _and : __ pop_l(rdx); __ andptr(rax, rdx); break; - case _or : __ pop_l(rdx); __ orptr (rax, rdx); break; - case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break; - default : ShouldNotReachHere(); - } -} - -void TemplateTable::idiv() { - transition(itos, itos); - __ movl(rcx, rax); - __ pop_i(rax); - // Note: could xor eax and ecx and compare with (-1 ^ min_int). If - // they are not equal, one could do a normal division (no correction - // needed), which may speed up this implementation for the common case. - // (see also JVM spec., p.243 & p.271) - __ corrected_idivl(rcx); -} - -void TemplateTable::irem() { - transition(itos, itos); - __ movl(rcx, rax); - __ pop_i(rax); - // Note: could xor eax and ecx and compare with (-1 ^ min_int). If - // they are not equal, one could do a normal division (no correction - // needed), which may speed up this implementation for the common case. - // (see also JVM spec., p.243 & p.271) - __ corrected_idivl(rcx); - __ movl(rax, rdx); -} - -void TemplateTable::lmul() { - transition(ltos, ltos); - __ pop_l(rdx); - __ imulq(rax, rdx); -} - -void TemplateTable::ldiv() { - transition(ltos, ltos); - __ mov(rcx, rax); - __ pop_l(rax); - // generate explicit div0 check - __ testq(rcx, rcx); - __ jump_cc(Assembler::zero, - ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); - // Note: could xor rax and rcx and compare with (-1 ^ min_int). If - // they are not equal, one could do a normal division (no correction - // needed), which may speed up this implementation for the common case. - // (see also JVM spec., p.243 & p.271) - __ corrected_idivq(rcx); // kills rbx -} - -void TemplateTable::lrem() { - transition(ltos, ltos); - __ mov(rcx, rax); - __ pop_l(rax); - __ testq(rcx, rcx); - __ jump_cc(Assembler::zero, - ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); - // Note: could xor rax and rcx and compare with (-1 ^ min_int). If - // they are not equal, one could do a normal division (no correction - // needed), which may speed up this implementation for the common case. - // (see also JVM spec., p.243 & p.271) - __ corrected_idivq(rcx); // kills rbx - __ mov(rax, rdx); -} - -void TemplateTable::lshl() { - transition(itos, ltos); - __ movl(rcx, rax); // get shift count - __ pop_l(rax); // get shift value - __ shlq(rax); -} - -void TemplateTable::lshr() { - transition(itos, ltos); - __ movl(rcx, rax); // get shift count - __ pop_l(rax); // get shift value - __ sarq(rax); -} - -void TemplateTable::lushr() { - transition(itos, ltos); - __ movl(rcx, rax); // get shift count - __ pop_l(rax); // get shift value - __ shrq(rax); -} - -void TemplateTable::fop2(Operation op) { - transition(ftos, ftos); - switch (op) { - case add: - __ addss(xmm0, at_rsp()); - __ addptr(rsp, Interpreter::stackElementSize); - break; - case sub: - __ movflt(xmm1, xmm0); - __ pop_f(xmm0); - __ subss(xmm0, xmm1); - break; - case mul: - __ mulss(xmm0, at_rsp()); - __ addptr(rsp, Interpreter::stackElementSize); - break; - case div: - __ movflt(xmm1, xmm0); - __ pop_f(xmm0); - __ divss(xmm0, xmm1); - break; - case rem: - __ movflt(xmm1, xmm0); - __ pop_f(xmm0); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2); - break; - default: - ShouldNotReachHere(); - break; - } -} - -void TemplateTable::dop2(Operation op) { - transition(dtos, dtos); - switch (op) { - case add: - __ addsd(xmm0, at_rsp()); - __ addptr(rsp, 2 * Interpreter::stackElementSize); - break; - case sub: - __ movdbl(xmm1, xmm0); - __ pop_d(xmm0); - __ subsd(xmm0, xmm1); - break; - case mul: - __ mulsd(xmm0, at_rsp()); - __ addptr(rsp, 2 * Interpreter::stackElementSize); - break; - case div: - __ movdbl(xmm1, xmm0); - __ pop_d(xmm0); - __ divsd(xmm0, xmm1); - break; - case rem: - __ movdbl(xmm1, xmm0); - __ pop_d(xmm0); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2); - break; - default: - ShouldNotReachHere(); - break; - } -} - -void TemplateTable::ineg() { - transition(itos, itos); - __ negl(rax); -} - -void TemplateTable::lneg() { - transition(ltos, ltos); - __ negq(rax); -} - -// Note: 'double' and 'long long' have 32-bits alignment on x86. -static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { - // Use the expression (adr)&(~0xF) to provide 128-bits aligned address - // of 128-bits operands for SSE instructions. - jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF))); - // Store the value to a 128-bits operand. - operand[0] = lo; - operand[1] = hi; - return operand; -} - -// Buffer for 128-bits masks used by SSE instructions. -static jlong float_signflip_pool[2*2]; -static jlong double_signflip_pool[2*2]; - -void TemplateTable::fneg() { - transition(ftos, ftos); - static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000); - __ xorps(xmm0, ExternalAddress((address) float_signflip)); -} - -void TemplateTable::dneg() { - transition(dtos, dtos); - static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000); - __ xorpd(xmm0, ExternalAddress((address) double_signflip)); -} - -void TemplateTable::iinc() { - transition(vtos, vtos); - __ load_signed_byte(rdx, at_bcp(2)); // get constant - locals_index(rbx); - __ addl(iaddress(rbx), rdx); -} - -void TemplateTable::wide_iinc() { - transition(vtos, vtos); - __ movl(rdx, at_bcp(4)); // get constant - locals_index_wide(rbx); - __ bswapl(rdx); // swap bytes & sign-extend constant - __ sarl(rdx, 16); - __ addl(iaddress(rbx), rdx); - // Note: should probably use only one movl to get both - // the index and the constant -> fix this -} - -void TemplateTable::convert() { - // Checking -#ifdef ASSERT - { - TosState tos_in = ilgl; - TosState tos_out = ilgl; - switch (bytecode()) { - case Bytecodes::_i2l: // fall through - case Bytecodes::_i2f: // fall through - case Bytecodes::_i2d: // fall through - case Bytecodes::_i2b: // fall through - case Bytecodes::_i2c: // fall through - case Bytecodes::_i2s: tos_in = itos; break; - case Bytecodes::_l2i: // fall through - case Bytecodes::_l2f: // fall through - case Bytecodes::_l2d: tos_in = ltos; break; - case Bytecodes::_f2i: // fall through - case Bytecodes::_f2l: // fall through - case Bytecodes::_f2d: tos_in = ftos; break; - case Bytecodes::_d2i: // fall through - case Bytecodes::_d2l: // fall through - case Bytecodes::_d2f: tos_in = dtos; break; - default : ShouldNotReachHere(); - } - switch (bytecode()) { - case Bytecodes::_l2i: // fall through - case Bytecodes::_f2i: // fall through - case Bytecodes::_d2i: // fall through - case Bytecodes::_i2b: // fall through - case Bytecodes::_i2c: // fall through - case Bytecodes::_i2s: tos_out = itos; break; - case Bytecodes::_i2l: // fall through - case Bytecodes::_f2l: // fall through - case Bytecodes::_d2l: tos_out = ltos; break; - case Bytecodes::_i2f: // fall through - case Bytecodes::_l2f: // fall through - case Bytecodes::_d2f: tos_out = ftos; break; - case Bytecodes::_i2d: // fall through - case Bytecodes::_l2d: // fall through - case Bytecodes::_f2d: tos_out = dtos; break; - default : ShouldNotReachHere(); - } - transition(tos_in, tos_out); - } -#endif // ASSERT - - static const int64_t is_nan = 0x8000000000000000L; - - // Conversion - switch (bytecode()) { - case Bytecodes::_i2l: - __ movslq(rax, rax); - break; - case Bytecodes::_i2f: - __ cvtsi2ssl(xmm0, rax); - break; - case Bytecodes::_i2d: - __ cvtsi2sdl(xmm0, rax); - break; - case Bytecodes::_i2b: - __ movsbl(rax, rax); - break; - case Bytecodes::_i2c: - __ movzwl(rax, rax); - break; - case Bytecodes::_i2s: - __ movswl(rax, rax); - break; - case Bytecodes::_l2i: - __ movl(rax, rax); - break; - case Bytecodes::_l2f: - __ cvtsi2ssq(xmm0, rax); - break; - case Bytecodes::_l2d: - __ cvtsi2sdq(xmm0, rax); - break; - case Bytecodes::_f2i: - { - Label L; - __ cvttss2sil(rax, xmm0); - __ cmpl(rax, 0x80000000); // NaN or overflow/underflow? - __ jcc(Assembler::notEqual, L); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1); - __ bind(L); - } - break; - case Bytecodes::_f2l: - { - Label L; - __ cvttss2siq(rax, xmm0); - // NaN or overflow/underflow? - __ cmp64(rax, ExternalAddress((address) &is_nan)); - __ jcc(Assembler::notEqual, L); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1); - __ bind(L); - } - break; - case Bytecodes::_f2d: - __ cvtss2sd(xmm0, xmm0); - break; - case Bytecodes::_d2i: - { - Label L; - __ cvttsd2sil(rax, xmm0); - __ cmpl(rax, 0x80000000); // NaN or overflow/underflow? - __ jcc(Assembler::notEqual, L); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1); - __ bind(L); - } - break; - case Bytecodes::_d2l: - { - Label L; - __ cvttsd2siq(rax, xmm0); - // NaN or overflow/underflow? - __ cmp64(rax, ExternalAddress((address) &is_nan)); - __ jcc(Assembler::notEqual, L); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1); - __ bind(L); - } - break; - case Bytecodes::_d2f: - __ cvtsd2ss(xmm0, xmm0); - break; - default: - ShouldNotReachHere(); - } -} - -void TemplateTable::lcmp() { - transition(ltos, itos); - Label done; - __ pop_l(rdx); - __ cmpq(rdx, rax); - __ movl(rax, -1); - __ jccb(Assembler::less, done); - __ setb(Assembler::notEqual, rax); - __ movzbl(rax, rax); - __ bind(done); -} - -void TemplateTable::float_cmp(bool is_float, int unordered_result) { - Label done; - if (is_float) { - // XXX get rid of pop here, use ... reg, mem32 - __ pop_f(xmm1); - __ ucomiss(xmm1, xmm0); - } else { - // XXX get rid of pop here, use ... reg, mem64 - __ pop_d(xmm1); - __ ucomisd(xmm1, xmm0); - } - if (unordered_result < 0) { - __ movl(rax, -1); - __ jccb(Assembler::parity, done); - __ jccb(Assembler::below, done); - __ setb(Assembler::notEqual, rdx); - __ movzbl(rax, rdx); - } else { - __ movl(rax, 1); - __ jccb(Assembler::parity, done); - __ jccb(Assembler::above, done); - __ movl(rax, 0); - __ jccb(Assembler::equal, done); - __ decrementl(rax); - } - __ bind(done); -} - -void TemplateTable::branch(bool is_jsr, bool is_wide) { - __ get_method(rcx); // rcx holds method - __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx - // holds bumped taken count - - const ByteSize be_offset = MethodCounters::backedge_counter_offset() + - InvocationCounter::counter_offset(); - const ByteSize inv_offset = MethodCounters::invocation_counter_offset() + - InvocationCounter::counter_offset(); - - // Load up edx with the branch displacement - if (is_wide) { - __ movl(rdx, at_bcp(1)); - } else { - __ load_signed_short(rdx, at_bcp(1)); - } - __ bswapl(rdx); - - if (!is_wide) { - __ sarl(rdx, 16); - } - __ movl2ptr(rdx, rdx); - - // Handle all the JSR stuff here, then exit. - // It's much shorter and cleaner than intermingling with the non-JSR - // normal-branch stuff occurring below. - if (is_jsr) { - // Pre-load the next target bytecode into rbx - __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0)); - - // compute return address as bci in rax - __ lea(rax, at_bcp((is_wide ? 5 : 3) - - in_bytes(ConstMethod::codes_offset()))); - __ subptr(rax, Address(rcx, Method::const_offset())); - // Adjust the bcp in r13 by the displacement in rdx - __ addptr(r13, rdx); - // jsr returns atos that is not an oop - __ push_i(rax); - __ dispatch_only(vtos); - return; - } - - // Normal (non-jsr) branch handling - - // Adjust the bcp in r13 by the displacement in rdx - __ addptr(r13, rdx); - - assert(UseLoopCounter || !UseOnStackReplacement, - "on-stack-replacement requires loop counters"); - Label backedge_counter_overflow; - Label profile_method; - Label dispatch; - if (UseLoopCounter) { - // increment backedge counter for backward branches - // rax: MDO - // ebx: MDO bumped taken-count - // rcx: method - // rdx: target offset - // r13: target bcp - // r14: locals pointer - __ testl(rdx, rdx); // check if forward or backward branch - __ jcc(Assembler::positive, dispatch); // count only if backward branch - - // check if MethodCounters exists - Label has_counters; - __ movptr(rax, Address(rcx, Method::method_counters_offset())); - __ testptr(rax, rax); - __ jcc(Assembler::notZero, has_counters); - __ push(rdx); - __ push(rcx); - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), - rcx); - __ pop(rcx); - __ pop(rdx); - __ movptr(rax, Address(rcx, Method::method_counters_offset())); - __ jcc(Assembler::zero, dispatch); - __ bind(has_counters); - - if (TieredCompilation) { - Label no_mdo; - int increment = InvocationCounter::count_increment; - if (ProfileInterpreter) { - // Are we profiling? - __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset()))); - __ testptr(rbx, rbx); - __ jccb(Assembler::zero, no_mdo); - // Increment the MDO backedge counter - const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) + - in_bytes(InvocationCounter::counter_offset())); - const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset())); - __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, - rax, false, Assembler::zero, &backedge_counter_overflow); - __ jmp(dispatch); - } - __ bind(no_mdo); - // Increment backedge counter in MethodCounters* - __ movptr(rcx, Address(rcx, Method::method_counters_offset())); - const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset())); - __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask, - rax, false, Assembler::zero, &backedge_counter_overflow); - } else { // not TieredCompilation - // increment counter - __ movptr(rcx, Address(rcx, Method::method_counters_offset())); - __ movl(rax, Address(rcx, be_offset)); // load backedge counter - __ incrementl(rax, InvocationCounter::count_increment); // increment counter - __ movl(Address(rcx, be_offset), rax); // store counter - - __ movl(rax, Address(rcx, inv_offset)); // load invocation counter - - __ andl(rax, InvocationCounter::count_mask_value); // and the status bits - __ addl(rax, Address(rcx, be_offset)); // add both counters - - if (ProfileInterpreter) { - // Test to see if we should create a method data oop - __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); - __ jcc(Assembler::less, dispatch); - - // if no method data exists, go to profile method - __ test_method_data_pointer(rax, profile_method); - - if (UseOnStackReplacement) { - // check for overflow against ebx which is the MDO taken count - __ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()))); - __ jcc(Assembler::below, dispatch); - - // When ProfileInterpreter is on, the backedge_count comes - // from the MethodData*, which value does not get reset on - // the call to frequency_counter_overflow(). To avoid - // excessive calls to the overflow routine while the method is - // being compiled, add a second test to make sure the overflow - // function is called only once every overflow_frequency. - const int overflow_frequency = 1024; - __ andl(rbx, overflow_frequency - 1); - __ jcc(Assembler::zero, backedge_counter_overflow); - - } - } else { - if (UseOnStackReplacement) { - // check for overflow against eax, which is the sum of the - // counters - __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()))); - __ jcc(Assembler::aboveEqual, backedge_counter_overflow); - - } - } - } - __ bind(dispatch); - } - - // Pre-load the next target bytecode into rbx - __ load_unsigned_byte(rbx, Address(r13, 0)); - - // continue with the bytecode @ target - // eax: return bci for jsr's, unused otherwise - // ebx: target bytecode - // r13: target bcp - __ dispatch_only(vtos); - - if (UseLoopCounter) { - if (ProfileInterpreter) { - // Out-of-line code to allocate method data oop. - __ bind(profile_method); - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); - __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode - __ set_method_data_pointer_for_bcp(); - __ jmp(dispatch); - } - - if (UseOnStackReplacement) { - // invocation counter overflow - __ bind(backedge_counter_overflow); - __ negptr(rdx); - __ addptr(rdx, r13); // branch bcp - // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp) - __ call_VM(noreg, - CAST_FROM_FN_PTR(address, - InterpreterRuntime::frequency_counter_overflow), - rdx); - __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode - - // rax: osr nmethod (osr ok) or NULL (osr not possible) - // ebx: target bytecode - // rdx: scratch - // r14: locals pointer - // r13: bcp - __ testptr(rax, rax); // test result - __ jcc(Assembler::zero, dispatch); // no osr if null - // nmethod may have been invalidated (VM may block upon call_VM return) - __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use); - __ jcc(Assembler::notEqual, dispatch); - - // We have the address of an on stack replacement routine in eax - // We need to prepare to execute the OSR method. First we must - // migrate the locals and monitors off of the stack. - - __ mov(r13, rax); // save the nmethod - - call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); - - // eax is OSR buffer, move it to expected parameter location - __ mov(j_rarg0, rax); - - // We use j_rarg definitions here so that registers don't conflict as parameter - // registers change across platforms as we are in the midst of a calling - // sequence to the OSR nmethod and we don't want collision. These are NOT parameters. - - const Register retaddr = j_rarg2; - const Register sender_sp = j_rarg1; - - // pop the interpreter frame - __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp - __ leave(); // remove frame anchor - __ pop(retaddr); // get return address - __ mov(rsp, sender_sp); // set sp to sender sp - // Ensure compiled code always sees stack at proper alignment - __ andptr(rsp, -(StackAlignmentInBytes)); - - // unlike x86 we need no specialized return from compiled code - // to the interpreter or the call stub. - - // push the return address - __ push(retaddr); - - // and begin the OSR nmethod - __ jmp(Address(r13, nmethod::osr_entry_point_offset())); - } - } -} - - -void TemplateTable::if_0cmp(Condition cc) { - transition(itos, vtos); - // assume branch is more often taken than not (loops use backward branches) - Label not_taken; - __ testl(rax, rax); - __ jcc(j_not(cc), not_taken); - branch(false, false); - __ bind(not_taken); - __ profile_not_taken_branch(rax); -} - -void TemplateTable::if_icmp(Condition cc) { - transition(itos, vtos); - // assume branch is more often taken than not (loops use backward branches) - Label not_taken; - __ pop_i(rdx); - __ cmpl(rdx, rax); - __ jcc(j_not(cc), not_taken); - branch(false, false); - __ bind(not_taken); - __ profile_not_taken_branch(rax); -} - -void TemplateTable::if_nullcmp(Condition cc) { - transition(atos, vtos); - // assume branch is more often taken than not (loops use backward branches) - Label not_taken; - __ testptr(rax, rax); - __ jcc(j_not(cc), not_taken); - branch(false, false); - __ bind(not_taken); - __ profile_not_taken_branch(rax); -} - -void TemplateTable::if_acmp(Condition cc) { - transition(atos, vtos); - // assume branch is more often taken than not (loops use backward branches) - Label not_taken; - __ pop_ptr(rdx); - __ cmpptr(rdx, rax); - __ jcc(j_not(cc), not_taken); - branch(false, false); - __ bind(not_taken); - __ profile_not_taken_branch(rax); -} - -void TemplateTable::ret() { - transition(vtos, vtos); - locals_index(rbx); - __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp - __ profile_ret(rbx, rcx); - __ get_method(rax); - __ movptr(r13, Address(rax, Method::const_offset())); - __ lea(r13, Address(r13, rbx, Address::times_1, - ConstMethod::codes_offset())); - __ dispatch_next(vtos); -} - -void TemplateTable::wide_ret() { - transition(vtos, vtos); - locals_index_wide(rbx); - __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp - __ profile_ret(rbx, rcx); - __ get_method(rax); - __ movptr(r13, Address(rax, Method::const_offset())); - __ lea(r13, Address(r13, rbx, Address::times_1, ConstMethod::codes_offset())); - __ dispatch_next(vtos); -} - -void TemplateTable::tableswitch() { - Label default_case, continue_execution; - transition(itos, vtos); - // align r13 - __ lea(rbx, at_bcp(BytesPerInt)); - __ andptr(rbx, -BytesPerInt); - // load lo & hi - __ movl(rcx, Address(rbx, BytesPerInt)); - __ movl(rdx, Address(rbx, 2 * BytesPerInt)); - __ bswapl(rcx); - __ bswapl(rdx); - // check against lo & hi - __ cmpl(rax, rcx); - __ jcc(Assembler::less, default_case); - __ cmpl(rax, rdx); - __ jcc(Assembler::greater, default_case); - // lookup dispatch offset - __ subl(rax, rcx); - __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt)); - __ profile_switch_case(rax, rbx, rcx); - // continue execution - __ bind(continue_execution); - __ bswapl(rdx); - __ movl2ptr(rdx, rdx); - __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1)); - __ addptr(r13, rdx); - __ dispatch_only(vtos); - // handle default - __ bind(default_case); - __ profile_switch_default(rax); - __ movl(rdx, Address(rbx, 0)); - __ jmp(continue_execution); -} - -void TemplateTable::lookupswitch() { - transition(itos, itos); - __ stop("lookupswitch bytecode should have been rewritten"); -} - -void TemplateTable::fast_linearswitch() { - transition(itos, vtos); - Label loop_entry, loop, found, continue_execution; - // bswap rax so we can avoid bswapping the table entries - __ bswapl(rax); - // align r13 - __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of - // this instruction (change offsets - // below) - __ andptr(rbx, -BytesPerInt); - // set counter - __ movl(rcx, Address(rbx, BytesPerInt)); - __ bswapl(rcx); - __ jmpb(loop_entry); - // table search - __ bind(loop); - __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt)); - __ jcc(Assembler::equal, found); - __ bind(loop_entry); - __ decrementl(rcx); - __ jcc(Assembler::greaterEqual, loop); - // default case - __ profile_switch_default(rax); - __ movl(rdx, Address(rbx, 0)); - __ jmp(continue_execution); - // entry found -> get offset - __ bind(found); - __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt)); - __ profile_switch_case(rcx, rax, rbx); - // continue execution - __ bind(continue_execution); - __ bswapl(rdx); - __ movl2ptr(rdx, rdx); - __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1)); - __ addptr(r13, rdx); - __ dispatch_only(vtos); -} - -void TemplateTable::fast_binaryswitch() { - transition(itos, vtos); - // Implementation using the following core algorithm: - // - // int binary_search(int key, LookupswitchPair* array, int n) { - // // Binary search according to "Methodik des Programmierens" by - // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. - // int i = 0; - // int j = n; - // while (i+1 < j) { - // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) - // // with Q: for all i: 0 <= i < n: key < a[i] - // // where a stands for the array and assuming that the (inexisting) - // // element a[n] is infinitely big. - // int h = (i + j) >> 1; - // // i < h < j - // if (key < array[h].fast_match()) { - // j = h; - // } else { - // i = h; - // } - // } - // // R: a[i] <= key < a[i+1] or Q - // // (i.e., if key is within array, i is the correct index) - // return i; - // } - - // Register allocation - const Register key = rax; // already set (tosca) - const Register array = rbx; - const Register i = rcx; - const Register j = rdx; - const Register h = rdi; - const Register temp = rsi; - - // Find array start - __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to - // get rid of this - // instruction (change - // offsets below) - __ andptr(array, -BytesPerInt); - - // Initialize i & j - __ xorl(i, i); // i = 0; - __ movl(j, Address(array, -BytesPerInt)); // j = length(array); - - // Convert j into native byteordering - __ bswapl(j); - - // And start - Label entry; - __ jmp(entry); - - // binary search loop - { - Label loop; - __ bind(loop); - // int h = (i + j) >> 1; - __ leal(h, Address(i, j, Address::times_1)); // h = i + j; - __ sarl(h, 1); // h = (i + j) >> 1; - // if (key < array[h].fast_match()) { - // j = h; - // } else { - // i = h; - // } - // Convert array[h].match to native byte-ordering before compare - __ movl(temp, Address(array, h, Address::times_8)); - __ bswapl(temp); - __ cmpl(key, temp); - // j = h if (key < array[h].fast_match()) - __ cmovl(Assembler::less, j, h); - // i = h if (key >= array[h].fast_match()) - __ cmovl(Assembler::greaterEqual, i, h); - // while (i+1 < j) - __ bind(entry); - __ leal(h, Address(i, 1)); // i+1 - __ cmpl(h, j); // i+1 < j - __ jcc(Assembler::less, loop); - } - - // end of binary search, result index is i (must check again!) - Label default_case; - // Convert array[i].match to native byte-ordering before compare - __ movl(temp, Address(array, i, Address::times_8)); - __ bswapl(temp); - __ cmpl(key, temp); - __ jcc(Assembler::notEqual, default_case); - - // entry found -> j = offset - __ movl(j , Address(array, i, Address::times_8, BytesPerInt)); - __ profile_switch_case(i, key, array); - __ bswapl(j); - __ movl2ptr(j, j); - __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1)); - __ addptr(r13, j); - __ dispatch_only(vtos); - - // default case -> j = default offset - __ bind(default_case); - __ profile_switch_default(i); - __ movl(j, Address(array, -2 * BytesPerInt)); - __ bswapl(j); - __ movl2ptr(j, j); - __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1)); - __ addptr(r13, j); - __ dispatch_only(vtos); -} - - -void TemplateTable::_return(TosState state) { - transition(state, state); - assert(_desc->calls_vm(), - "inconsistent calls_vm information"); // call in remove_activation - - if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { - assert(state == vtos, "only valid state"); - __ movptr(c_rarg1, aaddress(0)); - __ load_klass(rdi, c_rarg1); - __ movl(rdi, Address(rdi, Klass::access_flags_offset())); - __ testl(rdi, JVM_ACC_HAS_FINALIZER); - Label skip_register_finalizer; - __ jcc(Assembler::zero, skip_register_finalizer); - - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1); - - __ bind(skip_register_finalizer); - } - - __ remove_activation(state, r13); - __ jmp(r13); -} - -// ---------------------------------------------------------------------------- -// Volatile variables demand their effects be made known to all CPU's -// in order. Store buffers on most chips allow reads & writes to -// reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode -// without some kind of memory barrier (i.e., it's not sufficient that -// the interpreter does not reorder volatile references, the hardware -// also must not reorder them). -// -// According to the new Java Memory Model (JMM): -// (1) All volatiles are serialized wrt to each other. ALSO reads & -// writes act as aquire & release, so: -// (2) A read cannot let unrelated NON-volatile memory refs that -// happen after the read float up to before the read. It's OK for -// non-volatile memory refs that happen before the volatile read to -// float down below it. -// (3) Similar a volatile write cannot let unrelated NON-volatile -// memory refs that happen BEFORE the write float down to after the -// write. It's OK for non-volatile memory refs that happen after the -// volatile write to float up before it. -// -// We only put in barriers around volatile refs (they are expensive), -// not _between_ memory refs (that would require us to track the -// flavor of the previous memory refs). Requirements (2) and (3) -// require some barriers before volatile stores and after volatile -// loads. These nearly cover requirement (1) but miss the -// volatile-store-volatile-load case. This final case is placed after -// volatile-stores although it could just as well go before -// volatile-loads. -void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits - order_constraint) { - // Helper function to insert a is-volatile test and memory barrier - if (os::is_MP()) { // Not needed on single CPU - __ membar(order_constraint); - } -} - -void TemplateTable::resolve_cache_and_index(int byte_no, - Register Rcache, - Register index, - size_t index_size) { - const Register temp = rbx; - assert_different_registers(Rcache, index, temp); - - Label resolved; - assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); - __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size); - __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode? - __ jcc(Assembler::equal, resolved); - - // resolve first time through - address entry; - switch (bytecode()) { - case Bytecodes::_getstatic: - case Bytecodes::_putstatic: - case Bytecodes::_getfield: - case Bytecodes::_putfield: - entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); - break; - case Bytecodes::_invokevirtual: - case Bytecodes::_invokespecial: - case Bytecodes::_invokestatic: - case Bytecodes::_invokeinterface: - entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); - break; - case Bytecodes::_invokehandle: - entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); - break; - case Bytecodes::_invokedynamic: - entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); - break; - default: - fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode()))); - break; - } - __ movl(temp, (int) bytecode()); - __ call_VM(noreg, entry, temp); - - // Update registers with resolved info - __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); - __ bind(resolved); -} - -// The cache and index registers must be set before call -void TemplateTable::load_field_cp_cache_entry(Register obj, - Register cache, - Register index, - Register off, - Register flags, - bool is_static = false) { - assert_different_registers(cache, index, flags, off); - - ByteSize cp_base_offset = ConstantPoolCache::base_offset(); - // Field offset - __ movptr(off, Address(cache, index, Address::times_ptr, - in_bytes(cp_base_offset + - ConstantPoolCacheEntry::f2_offset()))); - // Flags - __ movl(flags, Address(cache, index, Address::times_ptr, - in_bytes(cp_base_offset + - ConstantPoolCacheEntry::flags_offset()))); - - // klass overwrite register - if (is_static) { - __ movptr(obj, Address(cache, index, Address::times_ptr, - in_bytes(cp_base_offset + - ConstantPoolCacheEntry::f1_offset()))); - const int mirror_offset = in_bytes(Klass::java_mirror_offset()); - __ movptr(obj, Address(obj, mirror_offset)); - } -} - -void TemplateTable::load_invoke_cp_cache_entry(int byte_no, - Register method, - Register itable_index, - Register flags, - bool is_invokevirtual, - bool is_invokevfinal, /*unused*/ - bool is_invokedynamic) { - // setup registers - const Register cache = rcx; - const Register index = rdx; - assert_different_registers(method, flags); - assert_different_registers(method, cache, index); - assert_different_registers(itable_index, flags); - assert_different_registers(itable_index, cache, index); - // determine constant pool cache field offsets - assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); - const int method_offset = in_bytes( - ConstantPoolCache::base_offset() + - ((byte_no == f2_byte) - ? ConstantPoolCacheEntry::f2_offset() - : ConstantPoolCacheEntry::f1_offset())); - const int flags_offset = in_bytes(ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::flags_offset()); - // access constant pool cache fields - const int index_offset = in_bytes(ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::f2_offset()); - - size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); - resolve_cache_and_index(byte_no, cache, index, index_size); - __ movptr(method, Address(cache, index, Address::times_ptr, method_offset)); - - if (itable_index != noreg) { - // pick up itable or appendix index from f2 also: - __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset)); - } - __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset)); -} - -// Correct values of the cache and index registers are preserved. -void TemplateTable::jvmti_post_field_access(Register cache, Register index, - bool is_static, bool has_tos) { - // do the JVMTI work here to avoid disturbing the register state below - // We use c_rarg registers here because we want to use the register used in - // the call to the VM - if (JvmtiExport::can_post_field_access()) { - // Check to see if a field access watch has been set before we - // take the time to call into the VM. - Label L1; - assert_different_registers(cache, index, rax); - __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); - __ testl(rax, rax); - __ jcc(Assembler::zero, L1); - - __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1); - - // cache entry pointer - __ addptr(c_rarg2, in_bytes(ConstantPoolCache::base_offset())); - __ shll(c_rarg3, LogBytesPerWord); - __ addptr(c_rarg2, c_rarg3); - if (is_static) { - __ xorl(c_rarg1, c_rarg1); // NULL object reference - } else { - __ movptr(c_rarg1, at_tos()); // get object pointer without popping it - __ verify_oop(c_rarg1); - } - // c_rarg1: object pointer or NULL - // c_rarg2: cache entry pointer - // c_rarg3: jvalue object on the stack - __ call_VM(noreg, CAST_FROM_FN_PTR(address, - InterpreterRuntime::post_field_access), - c_rarg1, c_rarg2, c_rarg3); - __ get_cache_and_index_at_bcp(cache, index, 1); - __ bind(L1); - } -} - -void TemplateTable::pop_and_check_object(Register r) { - __ pop_ptr(r); - __ null_check(r); // for field access must check obj. - __ verify_oop(r); -} - -void TemplateTable::getfield_or_static(int byte_no, bool is_static) { - transition(vtos, vtos); - - const Register cache = rcx; - const Register index = rdx; - const Register obj = c_rarg3; - const Register off = rbx; - const Register flags = rax; - const Register bc = c_rarg3; // uses same reg as obj, so don't mix them - - resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); - jvmti_post_field_access(cache, index, is_static, false); - load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); - - if (!is_static) { - // obj is on the stack - pop_and_check_object(obj); - } - - const Address field(obj, off, Address::times_1); - - Label Done, notByte, notInt, notShort, notChar, - notLong, notFloat, notObj, notDouble; - - __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); - // Make sure we don't need to mask edx after the above shift - assert(btos == 0, "change code, btos != 0"); - - __ andl(flags, ConstantPoolCacheEntry::tos_state_mask); - __ jcc(Assembler::notZero, notByte); - // btos - __ load_signed_byte(rax, field); - __ push(btos); - // Rewrite bytecode to be faster - if (!is_static) { - patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx); - } - __ jmp(Done); - - __ bind(notByte); - __ cmpl(flags, atos); - __ jcc(Assembler::notEqual, notObj); - // atos - __ load_heap_oop(rax, field); - __ push(atos); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx); - } - __ jmp(Done); - - __ bind(notObj); - __ cmpl(flags, itos); - __ jcc(Assembler::notEqual, notInt); - // itos - __ movl(rax, field); - __ push(itos); - // Rewrite bytecode to be faster - if (!is_static) { - patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx); - } - __ jmp(Done); - - __ bind(notInt); - __ cmpl(flags, ctos); - __ jcc(Assembler::notEqual, notChar); - // ctos - __ load_unsigned_short(rax, field); - __ push(ctos); - // Rewrite bytecode to be faster - if (!is_static) { - patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx); - } - __ jmp(Done); - - __ bind(notChar); - __ cmpl(flags, stos); - __ jcc(Assembler::notEqual, notShort); - // stos - __ load_signed_short(rax, field); - __ push(stos); - // Rewrite bytecode to be faster - if (!is_static) { - patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx); - } - __ jmp(Done); - - __ bind(notShort); - __ cmpl(flags, ltos); - __ jcc(Assembler::notEqual, notLong); - // ltos - __ movq(rax, field); - __ push(ltos); - // Rewrite bytecode to be faster - if (!is_static) { - patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx); - } - __ jmp(Done); - - __ bind(notLong); - __ cmpl(flags, ftos); - __ jcc(Assembler::notEqual, notFloat); - // ftos - __ movflt(xmm0, field); - __ push(ftos); - // Rewrite bytecode to be faster - if (!is_static) { - patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx); - } - __ jmp(Done); - - __ bind(notFloat); -#ifdef ASSERT - __ cmpl(flags, dtos); - __ jcc(Assembler::notEqual, notDouble); -#endif - // dtos - __ movdbl(xmm0, field); - __ push(dtos); - // Rewrite bytecode to be faster - if (!is_static) { - patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx); - } -#ifdef ASSERT - __ jmp(Done); - - __ bind(notDouble); - __ stop("Bad state"); -#endif - - __ bind(Done); - // [jk] not needed currently - // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad | - // Assembler::LoadStore)); -} - - -void TemplateTable::getfield(int byte_no) { - getfield_or_static(byte_no, false); -} - -void TemplateTable::getstatic(int byte_no) { - getfield_or_static(byte_no, true); -} - -// The registers cache and index expected to be set before call. -// The function may destroy various registers, just not the cache and index registers. -void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { - transition(vtos, vtos); - - ByteSize cp_base_offset = ConstantPoolCache::base_offset(); - - if (JvmtiExport::can_post_field_modification()) { - // Check to see if a field modification watch has been set before - // we take the time to call into the VM. - Label L1; - assert_different_registers(cache, index, rax); - __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); - __ testl(rax, rax); - __ jcc(Assembler::zero, L1); - - __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1); - - if (is_static) { - // Life is simple. Null out the object pointer. - __ xorl(c_rarg1, c_rarg1); - } else { - // Life is harder. The stack holds the value on top, followed by - // the object. We don't know the size of the value, though; it - // could be one or two words depending on its type. As a result, - // we must find the type to determine where the object is. - __ movl(c_rarg3, Address(c_rarg2, rscratch1, - Address::times_8, - in_bytes(cp_base_offset + - ConstantPoolCacheEntry::flags_offset()))); - __ shrl(c_rarg3, ConstantPoolCacheEntry::tos_state_shift); - // Make sure we don't need to mask rcx after the above shift - ConstantPoolCacheEntry::verify_tos_state_shift(); - __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue - __ cmpl(c_rarg3, ltos); - __ cmovptr(Assembler::equal, - c_rarg1, at_tos_p2()); // ltos (two word jvalue) - __ cmpl(c_rarg3, dtos); - __ cmovptr(Assembler::equal, - c_rarg1, at_tos_p2()); // dtos (two word jvalue) - } - // cache entry pointer - __ addptr(c_rarg2, in_bytes(cp_base_offset)); - __ shll(rscratch1, LogBytesPerWord); - __ addptr(c_rarg2, rscratch1); - // object (tos) - __ mov(c_rarg3, rsp); - // c_rarg1: object pointer set up above (NULL if static) - // c_rarg2: cache entry pointer - // c_rarg3: jvalue object on the stack - __ call_VM(noreg, - CAST_FROM_FN_PTR(address, - InterpreterRuntime::post_field_modification), - c_rarg1, c_rarg2, c_rarg3); - __ get_cache_and_index_at_bcp(cache, index, 1); - __ bind(L1); - } -} - -void TemplateTable::putfield_or_static(int byte_no, bool is_static) { - transition(vtos, vtos); - - const Register cache = rcx; - const Register index = rdx; - const Register obj = rcx; - const Register off = rbx; - const Register flags = rax; - const Register bc = c_rarg3; - - resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); - jvmti_post_field_mod(cache, index, is_static); - load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); - - // [jk] not needed currently - // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | - // Assembler::StoreStore)); - - Label notVolatile, Done; - __ movl(rdx, flags); - __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); - __ andl(rdx, 0x1); - - // field address - const Address field(obj, off, Address::times_1); - - Label notByte, notInt, notShort, notChar, - notLong, notFloat, notObj, notDouble; - - __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); - - assert(btos == 0, "change code, btos != 0"); - __ andl(flags, ConstantPoolCacheEntry::tos_state_mask); - __ jcc(Assembler::notZero, notByte); - - // btos - { - __ pop(btos); - if (!is_static) pop_and_check_object(obj); - __ movb(field, rax); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no); - } - __ jmp(Done); - } - - __ bind(notByte); - __ cmpl(flags, atos); - __ jcc(Assembler::notEqual, notObj); - - // atos - { - __ pop(atos); - if (!is_static) pop_and_check_object(obj); - // Store into the field - do_oop_store(_masm, field, rax, _bs->kind(), false); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no); - } - __ jmp(Done); - } - - __ bind(notObj); - __ cmpl(flags, itos); - __ jcc(Assembler::notEqual, notInt); - - // itos - { - __ pop(itos); - if (!is_static) pop_and_check_object(obj); - __ movl(field, rax); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no); - } - __ jmp(Done); - } - - __ bind(notInt); - __ cmpl(flags, ctos); - __ jcc(Assembler::notEqual, notChar); - - // ctos - { - __ pop(ctos); - if (!is_static) pop_and_check_object(obj); - __ movw(field, rax); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no); - } - __ jmp(Done); - } - - __ bind(notChar); - __ cmpl(flags, stos); - __ jcc(Assembler::notEqual, notShort); - - // stos - { - __ pop(stos); - if (!is_static) pop_and_check_object(obj); - __ movw(field, rax); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no); - } - __ jmp(Done); - } - - __ bind(notShort); - __ cmpl(flags, ltos); - __ jcc(Assembler::notEqual, notLong); - - // ltos - { - __ pop(ltos); - if (!is_static) pop_and_check_object(obj); - __ movq(field, rax); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no); - } - __ jmp(Done); - } - - __ bind(notLong); - __ cmpl(flags, ftos); - __ jcc(Assembler::notEqual, notFloat); - - // ftos - { - __ pop(ftos); - if (!is_static) pop_and_check_object(obj); - __ movflt(field, xmm0); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no); - } - __ jmp(Done); - } - - __ bind(notFloat); -#ifdef ASSERT - __ cmpl(flags, dtos); - __ jcc(Assembler::notEqual, notDouble); -#endif - - // dtos - { - __ pop(dtos); - if (!is_static) pop_and_check_object(obj); - __ movdbl(field, xmm0); - if (!is_static) { - patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no); - } - } - -#ifdef ASSERT - __ jmp(Done); - - __ bind(notDouble); - __ stop("Bad state"); -#endif - - __ bind(Done); - - // Check for volatile store - __ testl(rdx, rdx); - __ jcc(Assembler::zero, notVolatile); - volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | - Assembler::StoreStore)); - __ bind(notVolatile); -} - -void TemplateTable::putfield(int byte_no) { - putfield_or_static(byte_no, false); -} - -void TemplateTable::putstatic(int byte_no) { - putfield_or_static(byte_no, true); -} - -void TemplateTable::jvmti_post_fast_field_mod() { - if (JvmtiExport::can_post_field_modification()) { - // Check to see if a field modification watch has been set before - // we take the time to call into the VM. - Label L2; - __ mov32(c_rarg3, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); - __ testl(c_rarg3, c_rarg3); - __ jcc(Assembler::zero, L2); - __ pop_ptr(rbx); // copy the object pointer from tos - __ verify_oop(rbx); - __ push_ptr(rbx); // put the object pointer back on tos - // Save tos values before call_VM() clobbers them. Since we have - // to do it for every data type, we use the saved values as the - // jvalue object. - switch (bytecode()) { // load values into the jvalue object - case Bytecodes::_fast_aputfield: __ push_ptr(rax); break; - case Bytecodes::_fast_bputfield: // fall through - case Bytecodes::_fast_sputfield: // fall through - case Bytecodes::_fast_cputfield: // fall through - case Bytecodes::_fast_iputfield: __ push_i(rax); break; - case Bytecodes::_fast_dputfield: __ push_d(); break; - case Bytecodes::_fast_fputfield: __ push_f(); break; - case Bytecodes::_fast_lputfield: __ push_l(rax); break; - - default: - ShouldNotReachHere(); - } - __ mov(c_rarg3, rsp); // points to jvalue on the stack - // access constant pool cache entry - __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1); - __ verify_oop(rbx); - // rbx: object pointer copied above - // c_rarg2: cache entry pointer - // c_rarg3: jvalue object on the stack - __ call_VM(noreg, - CAST_FROM_FN_PTR(address, - InterpreterRuntime::post_field_modification), - rbx, c_rarg2, c_rarg3); - - switch (bytecode()) { // restore tos values - case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break; - case Bytecodes::_fast_bputfield: // fall through - case Bytecodes::_fast_sputfield: // fall through - case Bytecodes::_fast_cputfield: // fall through - case Bytecodes::_fast_iputfield: __ pop_i(rax); break; - case Bytecodes::_fast_dputfield: __ pop_d(); break; - case Bytecodes::_fast_fputfield: __ pop_f(); break; - case Bytecodes::_fast_lputfield: __ pop_l(rax); break; - } - __ bind(L2); - } -} - -void TemplateTable::fast_storefield(TosState state) { - transition(state, vtos); - - ByteSize base = ConstantPoolCache::base_offset(); - - jvmti_post_fast_field_mod(); - - // access constant pool cache - __ get_cache_and_index_at_bcp(rcx, rbx, 1); - - // test for volatile with rdx - __ movl(rdx, Address(rcx, rbx, Address::times_8, - in_bytes(base + - ConstantPoolCacheEntry::flags_offset()))); - - // replace index with field offset from cache entry - __ movptr(rbx, Address(rcx, rbx, Address::times_8, - in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); - - // [jk] not needed currently - // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | - // Assembler::StoreStore)); - - Label notVolatile; - __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); - __ andl(rdx, 0x1); - - // Get object from stack - pop_and_check_object(rcx); - - // field address - const Address field(rcx, rbx, Address::times_1); - - // access field - switch (bytecode()) { - case Bytecodes::_fast_aputfield: - do_oop_store(_masm, field, rax, _bs->kind(), false); - break; - case Bytecodes::_fast_lputfield: - __ movq(field, rax); - break; - case Bytecodes::_fast_iputfield: - __ movl(field, rax); - break; - case Bytecodes::_fast_bputfield: - __ movb(field, rax); - break; - case Bytecodes::_fast_sputfield: - // fall through - case Bytecodes::_fast_cputfield: - __ movw(field, rax); - break; - case Bytecodes::_fast_fputfield: - __ movflt(field, xmm0); - break; - case Bytecodes::_fast_dputfield: - __ movdbl(field, xmm0); - break; - default: - ShouldNotReachHere(); - } - - // Check for volatile store - __ testl(rdx, rdx); - __ jcc(Assembler::zero, notVolatile); - volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | - Assembler::StoreStore)); - __ bind(notVolatile); -} - - -void TemplateTable::fast_accessfield(TosState state) { - transition(atos, state); - - // Do the JVMTI work here to avoid disturbing the register state below - if (JvmtiExport::can_post_field_access()) { - // Check to see if a field access watch has been set before we - // take the time to call into the VM. - Label L1; - __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); - __ testl(rcx, rcx); - __ jcc(Assembler::zero, L1); - // access constant pool cache entry - __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1); - __ verify_oop(rax); - __ push_ptr(rax); // save object pointer before call_VM() clobbers it - __ mov(c_rarg1, rax); - // c_rarg1: object pointer copied above - // c_rarg2: cache entry pointer - __ call_VM(noreg, - CAST_FROM_FN_PTR(address, - InterpreterRuntime::post_field_access), - c_rarg1, c_rarg2); - __ pop_ptr(rax); // restore object pointer - __ bind(L1); - } - - // access constant pool cache - __ get_cache_and_index_at_bcp(rcx, rbx, 1); - // replace index with field offset from cache entry - // [jk] not needed currently - // if (os::is_MP()) { - // __ movl(rdx, Address(rcx, rbx, Address::times_8, - // in_bytes(ConstantPoolCache::base_offset() + - // ConstantPoolCacheEntry::flags_offset()))); - // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); - // __ andl(rdx, 0x1); - // } - __ movptr(rbx, Address(rcx, rbx, Address::times_8, - in_bytes(ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::f2_offset()))); - - // rax: object - __ verify_oop(rax); - __ null_check(rax); - Address field(rax, rbx, Address::times_1); - - // access field - switch (bytecode()) { - case Bytecodes::_fast_agetfield: - __ load_heap_oop(rax, field); - __ verify_oop(rax); - break; - case Bytecodes::_fast_lgetfield: - __ movq(rax, field); - break; - case Bytecodes::_fast_igetfield: - __ movl(rax, field); - break; - case Bytecodes::_fast_bgetfield: - __ movsbl(rax, field); - break; - case Bytecodes::_fast_sgetfield: - __ load_signed_short(rax, field); - break; - case Bytecodes::_fast_cgetfield: - __ load_unsigned_short(rax, field); - break; - case Bytecodes::_fast_fgetfield: - __ movflt(xmm0, field); - break; - case Bytecodes::_fast_dgetfield: - __ movdbl(xmm0, field); - break; - default: - ShouldNotReachHere(); - } - // [jk] not needed currently - // if (os::is_MP()) { - // Label notVolatile; - // __ testl(rdx, rdx); - // __ jcc(Assembler::zero, notVolatile); - // __ membar(Assembler::LoadLoad); - // __ bind(notVolatile); - //}; -} - -void TemplateTable::fast_xaccess(TosState state) { - transition(vtos, state); - - // get receiver - __ movptr(rax, aaddress(0)); - // access constant pool cache - __ get_cache_and_index_at_bcp(rcx, rdx, 2); - __ movptr(rbx, - Address(rcx, rdx, Address::times_8, - in_bytes(ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::f2_offset()))); - // make sure exception is reported in correct bcp range (getfield is - // next instruction) - __ increment(r13); - __ null_check(rax); - switch (state) { - case itos: - __ movl(rax, Address(rax, rbx, Address::times_1)); - break; - case atos: - __ load_heap_oop(rax, Address(rax, rbx, Address::times_1)); - __ verify_oop(rax); - break; - case ftos: - __ movflt(xmm0, Address(rax, rbx, Address::times_1)); - break; - default: - ShouldNotReachHere(); - } - - // [jk] not needed currently - // if (os::is_MP()) { - // Label notVolatile; - // __ movl(rdx, Address(rcx, rdx, Address::times_8, - // in_bytes(ConstantPoolCache::base_offset() + - // ConstantPoolCacheEntry::flags_offset()))); - // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); - // __ testl(rdx, 0x1); - // __ jcc(Assembler::zero, notVolatile); - // __ membar(Assembler::LoadLoad); - // __ bind(notVolatile); - // } - - __ decrement(r13); -} - - - -//----------------------------------------------------------------------------- -// Calls - -void TemplateTable::count_calls(Register method, Register temp) { - // implemented elsewhere - ShouldNotReachHere(); -} - -void TemplateTable::prepare_invoke(int byte_no, - Register method, // linked method (or i-klass) - Register index, // itable index, MethodType, etc. - Register recv, // if caller wants to see it - Register flags // if caller wants to test it - ) { - // determine flags - const Bytecodes::Code code = bytecode(); - const bool is_invokeinterface = code == Bytecodes::_invokeinterface; - const bool is_invokedynamic = code == Bytecodes::_invokedynamic; - const bool is_invokehandle = code == Bytecodes::_invokehandle; - const bool is_invokevirtual = code == Bytecodes::_invokevirtual; - const bool is_invokespecial = code == Bytecodes::_invokespecial; - const bool load_receiver = (recv != noreg); - const bool save_flags = (flags != noreg); - assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); - assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal"); - assert(flags == noreg || flags == rdx, ""); - assert(recv == noreg || recv == rcx, ""); - - // setup registers & access constant pool cache - if (recv == noreg) recv = rcx; - if (flags == noreg) flags = rdx; - assert_different_registers(method, index, recv, flags); - - // save 'interpreter return address' - __ save_bcp(); - - load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); - - // maybe push appendix to arguments (just before return address) - if (is_invokedynamic || is_invokehandle) { - Label L_no_push; - __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift)); - __ jcc(Assembler::zero, L_no_push); - // Push the appendix as a trailing parameter. - // This must be done before we get the receiver, - // since the parameter_size includes it. - __ push(rbx); - __ mov(rbx, index); - assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0"); - __ load_resolved_reference_at_index(index, rbx); - __ pop(rbx); - __ push(index); // push appendix (MethodType, CallSite, etc.) - __ bind(L_no_push); - } - - // load receiver if needed (after appendix is pushed so parameter size is correct) - // Note: no return address pushed yet - if (load_receiver) { - __ movl(recv, flags); - __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask); - const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address - const int receiver_is_at_end = -1; // back off one slot to get receiver - Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end); - __ movptr(recv, recv_addr); - __ verify_oop(recv); - } - - if (save_flags) { - __ movl(r13, flags); - } - - // compute return type - __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); - // Make sure we don't need to mask flags after the above shift - ConstantPoolCacheEntry::verify_tos_state_shift(); - // load return address - { - const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); - ExternalAddress table(table_addr); - __ lea(rscratch1, table); - __ movptr(flags, Address(rscratch1, flags, Address::times_ptr)); - } - - // push return address - __ push(flags); - - // Restore flags value from the constant pool cache, and restore rsi - // for later null checks. r13 is the bytecode pointer - if (save_flags) { - __ movl(flags, r13); - __ restore_bcp(); - } -} - - -void TemplateTable::invokevirtual_helper(Register index, - Register recv, - Register flags) { - // Uses temporary registers rax, rdx - assert_different_registers(index, recv, rax, rdx); - assert(index == rbx, ""); - assert(recv == rcx, ""); - - // Test for an invoke of a final method - Label notFinal; - __ movl(rax, flags); - __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift)); - __ jcc(Assembler::zero, notFinal); - - const Register method = index; // method must be rbx - assert(method == rbx, - "Method* must be rbx for interpreter calling convention"); - - // do the call - the index is actually the method to call - // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method* - - // It's final, need a null check here! - __ null_check(recv); - - // profile this call - __ profile_final_call(rax); - __ profile_arguments_type(rax, method, r13, true); - - __ jump_from_interpreted(method, rax); - - __ bind(notFinal); - - // get receiver klass - __ null_check(recv, oopDesc::klass_offset_in_bytes()); - __ load_klass(rax, recv); - - // profile this call - __ profile_virtual_call(rax, r14, rdx); - - // get target Method* & entry point - __ lookup_virtual_method(rax, index, method); - __ profile_arguments_type(rdx, method, r13, true); - __ jump_from_interpreted(method, rdx); -} - - -void TemplateTable::invokevirtual(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f2_byte, "use this argument"); - prepare_invoke(byte_no, - rbx, // method or vtable index - noreg, // unused itable index - rcx, rdx); // recv, flags - - // rbx: index - // rcx: receiver - // rdx: flags - - invokevirtual_helper(rbx, rcx, rdx); -} - - -void TemplateTable::invokespecial(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f1_byte, "use this argument"); - prepare_invoke(byte_no, rbx, noreg, // get f1 Method* - rcx); // get receiver also for null check - __ verify_oop(rcx); - __ null_check(rcx); - // do the call - __ profile_call(rax); - __ profile_arguments_type(rax, rbx, r13, false); - __ jump_from_interpreted(rbx, rax); -} - - -void TemplateTable::invokestatic(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f1_byte, "use this argument"); - prepare_invoke(byte_no, rbx); // get f1 Method* - // do the call - __ profile_call(rax); - __ profile_arguments_type(rax, rbx, r13, false); - __ jump_from_interpreted(rbx, rax); -} - -void TemplateTable::fast_invokevfinal(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f2_byte, "use this argument"); - __ stop("fast_invokevfinal not used on amd64"); -} - -void TemplateTable::invokeinterface(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f1_byte, "use this argument"); - prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 itable index - rcx, rdx); // recv, flags - - // rax: interface klass (from f1) - // rbx: itable index (from f2) - // rcx: receiver - // rdx: flags - - // Special case of invokeinterface called for virtual method of - // java.lang.Object. See cpCacheOop.cpp for details. - // This code isn't produced by javac, but could be produced by - // another compliant java compiler. - Label notMethod; - __ movl(r14, rdx); - __ andl(r14, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift)); - __ jcc(Assembler::zero, notMethod); - - invokevirtual_helper(rbx, rcx, rdx); - __ bind(notMethod); - - // Get receiver klass into rdx - also a null check - __ restore_locals(); // restore r14 - __ null_check(rcx, oopDesc::klass_offset_in_bytes()); - __ load_klass(rdx, rcx); - - // profile this call - __ profile_virtual_call(rdx, r13, r14); - - Label no_such_interface, no_such_method; - - __ lookup_interface_method(// inputs: rec. class, interface, itable index - rdx, rax, rbx, - // outputs: method, scan temp. reg - rbx, r13, - no_such_interface); - - // rbx: Method* to call - // rcx: receiver - // Check for abstract method error - // Note: This should be done more efficiently via a throw_abstract_method_error - // interpreter entry point and a conditional jump to it in case of a null - // method. - __ testptr(rbx, rbx); - __ jcc(Assembler::zero, no_such_method); - - __ profile_arguments_type(rdx, rbx, r13, true); - - // do the call - // rcx: receiver - // rbx,: Method* - __ jump_from_interpreted(rbx, rdx); - __ should_not_reach_here(); - - // exception handling code follows... - // note: must restore interpreter registers to canonical - // state for exception handling to work correctly! - - __ bind(no_such_method); - // throw exception - __ pop(rbx); // pop return address (pushed by prepare_invoke) - __ restore_bcp(); // r13 must be correct for exception handler (was destroyed) - __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); - // the call_VM checks for exception, so we should never return here. - __ should_not_reach_here(); - - __ bind(no_such_interface); - // throw exception - __ pop(rbx); // pop return address (pushed by prepare_invoke) - __ restore_bcp(); // r13 must be correct for exception handler (was destroyed) - __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) - __ call_VM(noreg, CAST_FROM_FN_PTR(address, - InterpreterRuntime::throw_IncompatibleClassChangeError)); - // the call_VM checks for exception, so we should never return here. - __ should_not_reach_here(); -} - - -void TemplateTable::invokehandle(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f1_byte, "use this argument"); - const Register rbx_method = rbx; - const Register rax_mtype = rax; - const Register rcx_recv = rcx; - const Register rdx_flags = rdx; - - prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv); - __ verify_method_ptr(rbx_method); - __ verify_oop(rcx_recv); - __ null_check(rcx_recv); - - // rax: MethodType object (from cpool->resolved_references[f1], if necessary) - // rbx: MH.invokeExact_MT method (from f2) - - // Note: rax_mtype is already pushed (if necessary) by prepare_invoke - - // FIXME: profile the LambdaForm also - __ profile_final_call(rax); - __ profile_arguments_type(rdx, rbx_method, r13, true); - - __ jump_from_interpreted(rbx_method, rdx); -} - - -void TemplateTable::invokedynamic(int byte_no) { - transition(vtos, vtos); - assert(byte_no == f1_byte, "use this argument"); - - const Register rbx_method = rbx; - const Register rax_callsite = rax; - - prepare_invoke(byte_no, rbx_method, rax_callsite); - - // rax: CallSite object (from cpool->resolved_references[f1]) - // rbx: MH.linkToCallSite method (from f2) - - // Note: rax_callsite is already pushed by prepare_invoke - - // %%% should make a type profile for any invokedynamic that takes a ref argument - // profile this call - __ profile_call(r13); - __ profile_arguments_type(rdx, rbx_method, r13, false); - - __ verify_oop(rax_callsite); - - __ jump_from_interpreted(rbx_method, rdx); -} - - -//----------------------------------------------------------------------------- -// Allocation - -void TemplateTable::_new() { - transition(vtos, atos); - __ get_unsigned_2_byte_index_at_bcp(rdx, 1); - Label slow_case; - Label done; - Label initialize_header; - Label initialize_object; // including clearing the fields - Label allocate_shared; - - __ get_cpool_and_tags(rsi, rax); - // Make sure the class we're about to instantiate has been resolved. - // This is done before loading InstanceKlass to be consistent with the order - // how Constant Pool is updated (see ConstantPool::klass_at_put) - const int tags_offset = Array::base_offset_in_bytes(); - __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), - JVM_CONSTANT_Class); - __ jcc(Assembler::notEqual, slow_case); - - // get InstanceKlass - __ movptr(rsi, Address(rsi, rdx, - Address::times_8, sizeof(ConstantPool))); - - // make sure klass is initialized & doesn't have finalizer - // make sure klass is fully initialized - __ cmpb(Address(rsi, - InstanceKlass::init_state_offset()), - InstanceKlass::fully_initialized); - __ jcc(Assembler::notEqual, slow_case); - - // get instance_size in InstanceKlass (scaled to a count of bytes) - __ movl(rdx, - Address(rsi, - Klass::layout_helper_offset())); - // test to see if it has a finalizer or is malformed in some way - __ testl(rdx, Klass::_lh_instance_slow_path_bit); - __ jcc(Assembler::notZero, slow_case); - - // Allocate the instance - // 1) Try to allocate in the TLAB - // 2) if fail and the object is large allocate in the shared Eden - // 3) if the above fails (or is not applicable), go to a slow case - // (creates a new TLAB, etc.) - - const bool allow_shared_alloc = - Universe::heap()->supports_inline_contig_alloc(); - - if (UseTLAB) { - __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); - __ lea(rbx, Address(rax, rdx, Address::times_1)); - __ cmpptr(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset()))); - __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case); - __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx); - if (ZeroTLAB) { - // the fields have been already cleared - __ jmp(initialize_header); - } else { - // initialize both the header and fields - __ jmp(initialize_object); - } - } - - // Allocation in the shared Eden, if allowed. - // - // rdx: instance size in bytes - if (allow_shared_alloc) { - __ bind(allocate_shared); - - ExternalAddress top((address)Universe::heap()->top_addr()); - ExternalAddress end((address)Universe::heap()->end_addr()); - - const Register RtopAddr = rscratch1; - const Register RendAddr = rscratch2; - - __ lea(RtopAddr, top); - __ lea(RendAddr, end); - __ movptr(rax, Address(RtopAddr, 0)); - - // For retries rax gets set by cmpxchgq - Label retry; - __ bind(retry); - __ lea(rbx, Address(rax, rdx, Address::times_1)); - __ cmpptr(rbx, Address(RendAddr, 0)); - __ jcc(Assembler::above, slow_case); - - // Compare rax with the top addr, and if still equal, store the new - // top addr in rbx at the address of the top addr pointer. Sets ZF if was - // equal, and clears it otherwise. Use lock prefix for atomicity on MPs. - // - // rax: object begin - // rbx: object end - // rdx: instance size in bytes - if (os::is_MP()) { - __ lock(); - } - __ cmpxchgptr(rbx, Address(RtopAddr, 0)); - - // if someone beat us on the allocation, try again, otherwise continue - __ jcc(Assembler::notEqual, retry); - - __ incr_allocated_bytes(r15_thread, rdx, 0); - } - - if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { - // The object is initialized before the header. If the object size is - // zero, go directly to the header initialization. - __ bind(initialize_object); - __ decrementl(rdx, sizeof(oopDesc)); - __ jcc(Assembler::zero, initialize_header); - - // Initialize object fields - __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code) - __ shrl(rdx, LogBytesPerLong); // divide by oopSize to simplify the loop - { - Label loop; - __ bind(loop); - __ movq(Address(rax, rdx, Address::times_8, - sizeof(oopDesc) - oopSize), - rcx); - __ decrementl(rdx); - __ jcc(Assembler::notZero, loop); - } - - // initialize object header only. - __ bind(initialize_header); - if (UseBiasedLocking) { - __ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset())); - __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1); - } else { - __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), - (intptr_t) markOopDesc::prototype()); // header (address 0x1) - } - __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code) - __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops - __ store_klass(rax, rsi); // store klass last - - { - SkipIfEqual skip(_masm, &DTraceAllocProbes, false); - // Trigger dtrace event for fastpath - __ push(atos); // save the return value - __ call_VM_leaf( - CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax); - __ pop(atos); // restore the return value - - } - __ jmp(done); - } - - - // slow case - __ bind(slow_case); - __ get_constant_pool(c_rarg1); - __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); - call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2); - __ verify_oop(rax); - - // continue - __ bind(done); -} - -void TemplateTable::newarray() { - transition(itos, atos); - __ load_unsigned_byte(c_rarg1, at_bcp(1)); - __ movl(c_rarg2, rax); - call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), - c_rarg1, c_rarg2); -} - -void TemplateTable::anewarray() { - transition(itos, atos); - __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); - __ get_constant_pool(c_rarg1); - __ movl(c_rarg3, rax); - call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), - c_rarg1, c_rarg2, c_rarg3); -} - -void TemplateTable::arraylength() { - transition(atos, itos); - __ null_check(rax, arrayOopDesc::length_offset_in_bytes()); - __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes())); -} - -void TemplateTable::checkcast() { - transition(atos, atos); - Label done, is_null, ok_is_subtype, quicked, resolved; - __ testptr(rax, rax); // object is in rax - __ jcc(Assembler::zero, is_null); - - // Get cpool & tags index - __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array - __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index - // See if bytecode has already been quicked - __ cmpb(Address(rdx, rbx, - Address::times_1, - Array::base_offset_in_bytes()), - JVM_CONSTANT_Class); - __ jcc(Assembler::equal, quicked); - __ push(atos); // save receiver for result, and for GC - call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); - // vm_result_2 has metadata result - __ get_vm_result_2(rax, r15_thread); - __ pop_ptr(rdx); // restore receiver - __ jmpb(resolved); - - // Get superklass in rax and subklass in rbx - __ bind(quicked); - __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check - __ movptr(rax, Address(rcx, rbx, - Address::times_8, sizeof(ConstantPool))); - - __ bind(resolved); - __ load_klass(rbx, rdx); - - // Generate subtype check. Blows rcx, rdi. Object in rdx. - // Superklass in rax. Subklass in rbx. - __ gen_subtype_check(rbx, ok_is_subtype); - - // Come here on failure - __ push_ptr(rdx); - // object is at TOS - __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry)); - - // Come here on success - __ bind(ok_is_subtype); - __ mov(rax, rdx); // Restore object in rdx - - // Collect counts on whether this check-cast sees NULLs a lot or not. - if (ProfileInterpreter) { - __ jmp(done); - __ bind(is_null); - __ profile_null_seen(rcx); - } else { - __ bind(is_null); // same as 'done' - } - __ bind(done); -} - -void TemplateTable::instanceof() { - transition(atos, itos); - Label done, is_null, ok_is_subtype, quicked, resolved; - __ testptr(rax, rax); - __ jcc(Assembler::zero, is_null); - - // Get cpool & tags index - __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array - __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index - // See if bytecode has already been quicked - __ cmpb(Address(rdx, rbx, - Address::times_1, - Array::base_offset_in_bytes()), - JVM_CONSTANT_Class); - __ jcc(Assembler::equal, quicked); - - __ push(atos); // save receiver for result, and for GC - call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); - // vm_result_2 has metadata result - __ get_vm_result_2(rax, r15_thread); - __ pop_ptr(rdx); // restore receiver - __ verify_oop(rdx); - __ load_klass(rdx, rdx); - __ jmpb(resolved); - - // Get superklass in rax and subklass in rdx - __ bind(quicked); - __ load_klass(rdx, rax); - __ movptr(rax, Address(rcx, rbx, - Address::times_8, sizeof(ConstantPool))); - - __ bind(resolved); - - // Generate subtype check. Blows rcx, rdi - // Superklass in rax. Subklass in rdx. - __ gen_subtype_check(rdx, ok_is_subtype); - - // Come here on failure - __ xorl(rax, rax); - __ jmpb(done); - // Come here on success - __ bind(ok_is_subtype); - __ movl(rax, 1); - - // Collect counts on whether this test sees NULLs a lot or not. - if (ProfileInterpreter) { - __ jmp(done); - __ bind(is_null); - __ profile_null_seen(rcx); - } else { - __ bind(is_null); // same as 'done' - } - __ bind(done); - // rax = 0: obj == NULL or obj is not an instanceof the specified klass - // rax = 1: obj != NULL and obj is an instanceof the specified klass -} - -//----------------------------------------------------------------------------- -// Breakpoints -void TemplateTable::_breakpoint() { - // Note: We get here even if we are single stepping.. - // jbug inists on setting breakpoints at every bytecode - // even if we are in single step mode. - - transition(vtos, vtos); - - // get the unpatched byte code - __ get_method(c_rarg1); - __ call_VM(noreg, - CAST_FROM_FN_PTR(address, - InterpreterRuntime::get_original_bytecode_at), - c_rarg1, r13); - __ mov(rbx, rax); - - // post the breakpoint event - __ get_method(c_rarg1); - __ call_VM(noreg, - CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), - c_rarg1, r13); - - // complete the execution of original bytecode - __ dispatch_only_normal(vtos); -} - -//----------------------------------------------------------------------------- -// Exceptions - -void TemplateTable::athrow() { - transition(atos, vtos); - __ null_check(rax); - __ jump(ExternalAddress(Interpreter::throw_exception_entry())); -} - -//----------------------------------------------------------------------------- -// Synchronization -// -// Note: monitorenter & exit are symmetric routines; which is reflected -// in the assembly code structure as well -// -// Stack layout: -// -// [expressions ] <--- rsp = expression stack top -// .. -// [expressions ] -// [monitor entry] <--- monitor block top = expression stack bot -// .. -// [monitor entry] -// [frame data ] <--- monitor block bot -// ... -// [saved rbp ] <--- rbp -void TemplateTable::monitorenter() { - transition(atos, vtos); - - // check for NULL object - __ null_check(rax); - - const Address monitor_block_top( - rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); - const Address monitor_block_bot( - rbp, frame::interpreter_frame_initial_sp_offset * wordSize); - const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; - - Label allocated; - - // initialize entry pointer - __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL - - // find a free slot in the monitor block (result in c_rarg1) - { - Label entry, loop, exit; - __ movptr(c_rarg3, monitor_block_top); // points to current entry, - // starting with top-most entry - __ lea(c_rarg2, monitor_block_bot); // points to word before bottom - // of monitor block - __ jmpb(entry); - - __ bind(loop); - // check if current entry is used - __ cmpptr(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD); - // if not used then remember entry in c_rarg1 - __ cmov(Assembler::equal, c_rarg1, c_rarg3); - // check if current entry is for same object - __ cmpptr(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes())); - // if same object then stop searching - __ jccb(Assembler::equal, exit); - // otherwise advance to next entry - __ addptr(c_rarg3, entry_size); - __ bind(entry); - // check if bottom reached - __ cmpptr(c_rarg3, c_rarg2); - // if not at bottom then check this entry - __ jcc(Assembler::notEqual, loop); - __ bind(exit); - } - - __ testptr(c_rarg1, c_rarg1); // check if a slot has been found - __ jcc(Assembler::notZero, allocated); // if found, continue with that one - - // allocate one if there's no free slot - { - Label entry, loop; - // 1. compute new pointers // rsp: old expression stack top - __ movptr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom - __ subptr(rsp, entry_size); // move expression stack top - __ subptr(c_rarg1, entry_size); // move expression stack bottom - __ mov(c_rarg3, rsp); // set start value for copy loop - __ movptr(monitor_block_bot, c_rarg1); // set new monitor block bottom - __ jmp(entry); - // 2. move expression stack contents - __ bind(loop); - __ movptr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack - // word from old location - __ movptr(Address(c_rarg3, 0), c_rarg2); // and store it at new location - __ addptr(c_rarg3, wordSize); // advance to next word - __ bind(entry); - __ cmpptr(c_rarg3, c_rarg1); // check if bottom reached - __ jcc(Assembler::notEqual, loop); // if not at bottom then - // copy next word - } - - // call run-time routine - // c_rarg1: points to monitor entry - __ bind(allocated); - - // Increment bcp to point to the next bytecode, so exception - // handling for async. exceptions work correctly. - // The object has already been poped from the stack, so the - // expression stack looks correct. - __ increment(r13); - - // store object - __ movptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax); - __ lock_object(c_rarg1); - - // check to make sure this monitor doesn't cause stack overflow after locking - __ save_bcp(); // in case of exception - __ generate_stack_overflow_check(0); - - // The bcp has already been incremented. Just need to dispatch to - // next instruction. - __ dispatch_next(vtos); -} - - -void TemplateTable::monitorexit() { - transition(atos, vtos); - - // check for NULL object - __ null_check(rax); - - const Address monitor_block_top( - rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); - const Address monitor_block_bot( - rbp, frame::interpreter_frame_initial_sp_offset * wordSize); - const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; - - Label found; - - // find matching slot - { - Label entry, loop; - __ movptr(c_rarg1, monitor_block_top); // points to current entry, - // starting with top-most entry - __ lea(c_rarg2, monitor_block_bot); // points to word before bottom - // of monitor block - __ jmpb(entry); - - __ bind(loop); - // check if current entry is for same object - __ cmpptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); - // if same object then stop searching - __ jcc(Assembler::equal, found); - // otherwise advance to next entry - __ addptr(c_rarg1, entry_size); - __ bind(entry); - // check if bottom reached - __ cmpptr(c_rarg1, c_rarg2); - // if not at bottom then check this entry - __ jcc(Assembler::notEqual, loop); - } - - // error handling. Unlocking was not block-structured - __ call_VM(noreg, CAST_FROM_FN_PTR(address, - InterpreterRuntime::throw_illegal_monitor_state_exception)); - __ should_not_reach_here(); - - // call run-time routine - // rsi: points to monitor entry - __ bind(found); - __ push_ptr(rax); // make sure object is on stack (contract with oopMaps) - __ unlock_object(c_rarg1); - __ pop_ptr(rax); // discard object -} - - -// Wide instructions -void TemplateTable::wide() { - transition(vtos, vtos); - __ load_unsigned_byte(rbx, at_bcp(1)); - __ lea(rscratch1, ExternalAddress((address)Interpreter::_wentry_point)); - __ jmp(Address(rscratch1, rbx, Address::times_8)); - // Note: the r13 increment step is part of the individual wide - // bytecode implementations -} - - -// Multi arrays -void TemplateTable::multianewarray() { - transition(vtos, atos); - __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions - // last dim is on top of stack; we want address of first one: - // first_addr = last_addr + (ndims - 1) * wordSize - __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize)); - call_VM(rax, - CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), - c_rarg1); - __ load_unsigned_byte(rbx, at_bcp(3)); - __ lea(rsp, Address(rsp, rbx, Address::times_8)); -} -#endif // !CC_INTERP diff -r 17efac395638 -r a46e5922bb40 hotspot/src/os/linux/vm/os_linux.cpp --- a/hotspot/src/os/linux/vm/os_linux.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/os/linux/vm/os_linux.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -227,31 +227,7 @@ #endif // Cpu architecture string -#if defined(ZERO) -static char cpu_arch[] = ZERO_LIBARCH; -#elif defined(IA64) -static char cpu_arch[] = "ia64"; -#elif defined(IA32) -static char cpu_arch[] = "i386"; -#elif defined(AMD64) -static char cpu_arch[] = "amd64"; -#elif defined(ARM) -static char cpu_arch[] = "arm"; -#elif defined(PPC32) -static char cpu_arch[] = "ppc"; -#elif defined(PPC64) -static char cpu_arch[] = "ppc64"; -#elif defined(SPARC) - #ifdef _LP64 -static char cpu_arch[] = "sparcv9"; - #else -static char cpu_arch[] = "sparc"; - #endif -#elif defined(AARCH64) -static char cpu_arch[] = "aarch64"; -#else - #error Add appropriate cpu_arch setting -#endif +static char cpu_arch[] = HOTSPOT_LIB_ARCH; // pid_t gettid() @@ -3296,7 +3272,7 @@ #ifndef ZERO large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M) - ARM_ONLY(2 * M) PPC_ONLY(4 * M) AARCH64_ONLY(2 * M); + ARM32_ONLY(2 * M) PPC_ONLY(4 * M) AARCH64_ONLY(2 * M); #endif // ZERO FILE *fp = fopen("/proc/meminfo", "r"); diff -r 17efac395638 -r a46e5922bb40 hotspot/src/os/solaris/vm/jvm_solaris.h --- a/hotspot/src/os/solaris/vm/jvm_solaris.h Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/os/solaris/vm/jvm_solaris.h Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,7 +41,9 @@ * JNI conversion, which should be sorted out later. */ +#define __USE_LEGACY_PROTOTYPES__ #include /* For DIR */ +#undef __USE_LEGACY_PROTOTYPES__ #include /* For MAXPATHLEN */ #include /* For socklen_t */ #include /* For F_OK, R_OK, W_OK */ diff -r 17efac395638 -r a46e5922bb40 hotspot/src/os_cpu/aix_ppc/vm/orderAccess_aix_ppc.inline.hpp --- a/hotspot/src/os_cpu/aix_ppc/vm/orderAccess_aix_ppc.inline.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/os_cpu/aix_ppc/vm/orderAccess_aix_ppc.inline.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2014, SAP AG. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -28,6 +28,9 @@ #include "runtime/orderAccess.hpp" +// Compiler version last used for testing: xlc 12 +// Please update this information when this file changes + // Implementation of class OrderAccess. // @@ -61,86 +64,30 @@ #define inlasm_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory"); #define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory"); #define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory"); -#define inlasm_release() inlasm_lwsync(); -#define inlasm_acquire() inlasm_lwsync(); // Use twi-isync for load_acquire (faster than lwsync). // ATTENTION: seems like xlC 10.1 has problems with this inline assembler macro (VerifyMethodHandles found "bad vminfo in AMH.conv"): // #define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory"); #define inlasm_acquire_reg(X) inlasm_lwsync(); -#define inlasm_fence() inlasm_sync(); -inline void OrderAccess::loadload() { inlasm_lwsync(); } -inline void OrderAccess::storestore() { inlasm_lwsync(); } -inline void OrderAccess::loadstore() { inlasm_lwsync(); } -inline void OrderAccess::storeload() { inlasm_fence(); } - -inline void OrderAccess::acquire() { inlasm_acquire(); } -inline void OrderAccess::release() { inlasm_release(); } -inline void OrderAccess::fence() { inlasm_fence(); } - -inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { register jbyte t = *p; inlasm_acquire_reg(t); return t; } -inline jshort OrderAccess::load_acquire(volatile jshort* p) { register jshort t = *p; inlasm_acquire_reg(t); return t; } -inline jint OrderAccess::load_acquire(volatile jint* p) { register jint t = *p; inlasm_acquire_reg(t); return t; } -inline jlong OrderAccess::load_acquire(volatile jlong* p) { register jlong t = *p; inlasm_acquire_reg(t); return t; } -inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { register jubyte t = *p; inlasm_acquire_reg(t); return t; } -inline jushort OrderAccess::load_acquire(volatile jushort* p) { register jushort t = *p; inlasm_acquire_reg(t); return t; } -inline juint OrderAccess::load_acquire(volatile juint* p) { register juint t = *p; inlasm_acquire_reg(t); return t; } -inline julong OrderAccess::load_acquire(volatile julong* p) { return (julong)load_acquire((volatile jlong*)p); } -inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { register jfloat t = *p; inlasm_acquire(); return t; } -inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { register jdouble t = *p; inlasm_acquire(); return t; } - -inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return (intptr_t)load_acquire((volatile jlong*)p); } -inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return (void*) load_acquire((volatile jlong*)p); } -inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return (void*) load_acquire((volatile jlong*)p); } +inline void OrderAccess::loadload() { inlasm_lwsync(); } +inline void OrderAccess::storestore() { inlasm_lwsync(); } +inline void OrderAccess::loadstore() { inlasm_lwsync(); } +inline void OrderAccess::storeload() { inlasm_sync(); } -inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { inlasm_release(); *p = v; } -inline void OrderAccess::release_store(volatile jshort* p, jshort v) { inlasm_release(); *p = v; } -inline void OrderAccess::release_store(volatile jint* p, jint v) { inlasm_release(); *p = v; } -inline void OrderAccess::release_store(volatile jlong* p, jlong v) { inlasm_release(); *p = v; } -inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { inlasm_release(); *p = v; } -inline void OrderAccess::release_store(volatile jushort* p, jushort v) { inlasm_release(); *p = v; } -inline void OrderAccess::release_store(volatile juint* p, juint v) { inlasm_release(); *p = v; } -inline void OrderAccess::release_store(volatile julong* p, julong v) { inlasm_release(); *p = v; } -inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { inlasm_release(); *p = v; } -inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { inlasm_release(); *p = v; } - -inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { inlasm_release(); *p = v; } -inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { inlasm_release(); *(void* volatile *)p = v; } +inline void OrderAccess::acquire() { inlasm_lwsync(); } +inline void OrderAccess::release() { inlasm_lwsync(); } +inline void OrderAccess::fence() { inlasm_sync(); } -inline void OrderAccess::store_fence(jbyte* p, jbyte v) { *p = v; inlasm_fence(); } -inline void OrderAccess::store_fence(jshort* p, jshort v) { *p = v; inlasm_fence(); } -inline void OrderAccess::store_fence(jint* p, jint v) { *p = v; inlasm_fence(); } -inline void OrderAccess::store_fence(jlong* p, jlong v) { *p = v; inlasm_fence(); } -inline void OrderAccess::store_fence(jubyte* p, jubyte v) { *p = v; inlasm_fence(); } -inline void OrderAccess::store_fence(jushort* p, jushort v) { *p = v; inlasm_fence(); } -inline void OrderAccess::store_fence(juint* p, juint v) { *p = v; inlasm_fence(); } -inline void OrderAccess::store_fence(julong* p, julong v) { *p = v; inlasm_fence(); } -inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; inlasm_fence(); } -inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; inlasm_fence(); } - -inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; inlasm_fence(); } -inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v; inlasm_fence(); } - -inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { inlasm_release(); *p = v; inlasm_fence(); } -inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { inlasm_release(); *p = v; inlasm_fence(); } -inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { inlasm_release(); *p = v; inlasm_fence(); } -inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { inlasm_release(); *p = v; inlasm_fence(); } -inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { inlasm_release(); *p = v; inlasm_fence(); } -inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { inlasm_release(); *p = v; inlasm_fence(); } -inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { inlasm_release(); *p = v; inlasm_fence(); } -inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { inlasm_release(); *p = v; inlasm_fence(); } -inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { inlasm_release(); *p = v; inlasm_fence(); } -inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { inlasm_release(); *p = v; inlasm_fence(); } - -inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { inlasm_release(); *p = v; inlasm_fence(); } -inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { inlasm_release(); *(void* volatile *)p = v; inlasm_fence(); } +template<> inline jbyte OrderAccess::specialized_load_acquire (volatile jbyte* p) { register jbyte t = load(p); inlasm_acquire_reg(t); return t; } +template<> inline jshort OrderAccess::specialized_load_acquire(volatile jshort* p) { register jshort t = load(p); inlasm_acquire_reg(t); return t; } +template<> inline jint OrderAccess::specialized_load_acquire (volatile jint* p) { register jint t = load(p); inlasm_acquire_reg(t); return t; } +template<> inline jlong OrderAccess::specialized_load_acquire (volatile jlong* p) { register jlong t = load(p); inlasm_acquire_reg(t); return t; } #undef inlasm_sync #undef inlasm_lwsync #undef inlasm_eieio #undef inlasm_isync -#undef inlasm_release -#undef inlasm_acquire -#undef inlasm_fence + +#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 #endif // OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP diff -r 17efac395638 -r a46e5922bb40 hotspot/src/os_cpu/bsd_x86/vm/orderAccess_bsd_x86.inline.hpp --- a/hotspot/src/os_cpu/bsd_x86/vm/orderAccess_bsd_x86.inline.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/os_cpu/bsd_x86/vm/orderAccess_bsd_x86.inline.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,27 +29,27 @@ #include "runtime/orderAccess.hpp" #include "runtime/os.hpp" +// Compiler version last used for testing: clang 5.1 +// Please update this information when this file changes + +// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions +static inline void compiler_barrier() { + __asm__ volatile ("" : : : "memory"); +} + +// x86 is TSO and hence only needs a fence for storeload +// However, a compiler barrier is still needed to prevent reordering +// between volatile and non-volatile memory accesses. + // Implementation of class OrderAccess. -inline void OrderAccess::loadload() { acquire(); } -inline void OrderAccess::storestore() { release(); } -inline void OrderAccess::loadstore() { acquire(); } -inline void OrderAccess::storeload() { fence(); } +inline void OrderAccess::loadload() { compiler_barrier(); } +inline void OrderAccess::storestore() { compiler_barrier(); } +inline void OrderAccess::loadstore() { compiler_barrier(); } +inline void OrderAccess::storeload() { fence(); } -inline void OrderAccess::acquire() { - volatile intptr_t local_dummy; -#ifdef AMD64 - __asm__ volatile ("movq 0(%%rsp), %0" : "=r" (local_dummy) : : "memory"); -#else - __asm__ volatile ("movl 0(%%esp),%0" : "=r" (local_dummy) : : "memory"); -#endif // AMD64 -} - -inline void OrderAccess::release() { - // Avoid hitting the same cache-line from - // different threads. - volatile jint local_dummy = 0; -} +inline void OrderAccess::acquire() { compiler_barrier(); } +inline void OrderAccess::release() { compiler_barrier(); } inline void OrderAccess::fence() { if (os::is_MP()) { @@ -60,156 +60,50 @@ __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory"); #endif } + compiler_barrier(); } -inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; } -inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; } -inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; } -inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); } -inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; } -inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; } -inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; } -inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); } -inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; } -inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return jdouble_cast(Atomic::load((volatile jlong*)p)); } - -inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; } -inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; } -inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; } - -inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; } -inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; } -inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; } -inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); } -inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; } -inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; } -inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; } -inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); } -inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; } -inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong*)p, jlong_cast(v)); } - -inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; } -inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; } - -inline void OrderAccess::store_fence(jbyte* p, jbyte v) { +template<> +inline void OrderAccess::specialized_release_store_fence (volatile jbyte* p, jbyte v) { __asm__ volatile ( "xchgb (%2),%0" : "=q" (v) : "0" (v), "r" (p) : "memory"); } -inline void OrderAccess::store_fence(jshort* p, jshort v) { +template<> +inline void OrderAccess::specialized_release_store_fence(volatile jshort* p, jshort v) { __asm__ volatile ( "xchgw (%2),%0" : "=r" (v) : "0" (v), "r" (p) : "memory"); } -inline void OrderAccess::store_fence(jint* p, jint v) { +template<> +inline void OrderAccess::specialized_release_store_fence (volatile jint* p, jint v) { __asm__ volatile ( "xchgl (%2),%0" : "=r" (v) : "0" (v), "r" (p) : "memory"); } -inline void OrderAccess::store_fence(jlong* p, jlong v) { #ifdef AMD64 - __asm__ __volatile__ ("xchgq (%2), %0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); -#else - *p = v; fence(); -#endif // AMD64 -} - -// AMD64 copied the bodies for the the signed version. 32bit did this. As long as the -// compiler does the inlining this is simpler. -inline void OrderAccess::store_fence(jubyte* p, jubyte v) { store_fence((jbyte*)p, (jbyte)v); } -inline void OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); } -inline void OrderAccess::store_fence(juint* p, juint v) { store_fence((jint*)p, (jint)v); } -inline void OrderAccess::store_fence(julong* p, julong v) { store_fence((jlong*)p, (jlong)v); } -inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); } - -inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { -#ifdef AMD64 - __asm__ __volatile__ ("xchgq (%2), %0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); -#else - store_fence((jint*)p, (jint)v); -#endif // AMD64 -} - -inline void OrderAccess::store_ptr_fence(void** p, void* v) { -#ifdef AMD64 - __asm__ __volatile__ ("xchgq (%2), %0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); -#else - store_fence((jint*)p, (jint)v); -#endif // AMD64 -} - -// Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile. -inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { - __asm__ volatile ( "xchgb (%2),%0" - : "=q" (v) - : "0" (v), "r" (p) - : "memory"); -} -inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { - __asm__ volatile ( "xchgw (%2),%0" +template<> +inline void OrderAccess::specialized_release_store_fence (volatile jlong* p, jlong v) { + __asm__ volatile ( "xchgq (%2), %0" : "=r" (v) : "0" (v), "r" (p) : "memory"); } -inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { - __asm__ volatile ( "xchgl (%2),%0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); -} +#endif // AMD64 -inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { -#ifdef AMD64 - __asm__ __volatile__ ( "xchgq (%2), %0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); -#else - release_store(p, v); fence(); -#endif // AMD64 +template<> +inline void OrderAccess::specialized_release_store_fence (volatile jfloat* p, jfloat v) { + release_store_fence((volatile jint*)p, jint_cast(v)); +} +template<> +inline void OrderAccess::specialized_release_store_fence(volatile jdouble* p, jdouble v) { + release_store_fence((volatile jlong*)p, jlong_cast(v)); } -inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store_fence((volatile jbyte*)p, (jbyte)v); } -inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); } -inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { release_store_fence((volatile jint*)p, (jint)v); } -inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store_fence((volatile jlong*)p, (jlong)v); } - -inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store_fence((volatile jlong*)p, jlong_cast(v)); } - -inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { -#ifdef AMD64 - __asm__ __volatile__ ( "xchgq (%2), %0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); -#else - release_store_fence((volatile jint*)p, (jint)v); -#endif // AMD64 -} -inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { -#ifdef AMD64 - __asm__ __volatile__ ( "xchgq (%2), %0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); -#else - release_store_fence((volatile jint*)p, (jint)v); -#endif // AMD64 -} +#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 #endif // OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP diff -r 17efac395638 -r a46e5922bb40 hotspot/src/os_cpu/bsd_zero/vm/orderAccess_bsd_zero.inline.hpp --- a/hotspot/src/os_cpu/bsd_zero/vm/orderAccess_bsd_zero.inline.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/os_cpu/bsd_zero/vm/orderAccess_bsd_zero.inline.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2009 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -40,8 +40,7 @@ #define __kernel_dmb (*(__kernel_dmb_t *) 0xffff0fa0) #define FULL_MEM_BARRIER __kernel_dmb() -#define READ_MEM_BARRIER __kernel_dmb() -#define WRITE_MEM_BARRIER __kernel_dmb() +#define LIGHT_MEM_BARRIER __kernel_dmb() #else // ARM @@ -50,126 +49,31 @@ #ifdef PPC #ifdef __NO_LWSYNC__ -#define READ_MEM_BARRIER __asm __volatile ("sync":::"memory") -#define WRITE_MEM_BARRIER __asm __volatile ("sync":::"memory") +#define LIGHT_MEM_BARRIER __asm __volatile ("sync":::"memory") #else -#define READ_MEM_BARRIER __asm __volatile ("lwsync":::"memory") -#define WRITE_MEM_BARRIER __asm __volatile ("lwsync":::"memory") +#define LIGHT_MEM_BARRIER __asm __volatile ("lwsync":::"memory") #endif #else // PPC -#define READ_MEM_BARRIER __asm __volatile ("":::"memory") -#define WRITE_MEM_BARRIER __asm __volatile ("":::"memory") +#define LIGHT_MEM_BARRIER __asm __volatile ("":::"memory") #endif // PPC #endif // ARM - -inline void OrderAccess::loadload() { acquire(); } -inline void OrderAccess::storestore() { release(); } -inline void OrderAccess::loadstore() { acquire(); } -inline void OrderAccess::storeload() { fence(); } - -inline void OrderAccess::acquire() { - READ_MEM_BARRIER; -} - -inline void OrderAccess::release() { - WRITE_MEM_BARRIER; -} - -inline void OrderAccess::fence() { - FULL_MEM_BARRIER; -} - -inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { jbyte data = *p; acquire(); return data; } -inline jshort OrderAccess::load_acquire(volatile jshort* p) { jshort data = *p; acquire(); return data; } -inline jint OrderAccess::load_acquire(volatile jint* p) { jint data = *p; acquire(); return data; } -inline jlong OrderAccess::load_acquire(volatile jlong* p) { - jlong tmp; - os::atomic_copy64(p, &tmp); - acquire(); - return tmp; -} -inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { jubyte data = *p; acquire(); return data; } -inline jushort OrderAccess::load_acquire(volatile jushort* p) { jushort data = *p; acquire(); return data; } -inline juint OrderAccess::load_acquire(volatile juint* p) { juint data = *p; acquire(); return data; } -inline julong OrderAccess::load_acquire(volatile julong* p) { - julong tmp; - os::atomic_copy64(p, &tmp); - acquire(); - return tmp; -} -inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { jfloat data = *p; acquire(); return data; } -inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { - jdouble tmp; - os::atomic_copy64(p, &tmp); - acquire(); - return tmp; -} +// Note: What is meant by LIGHT_MEM_BARRIER is a barrier which is sufficient +// to provide TSO semantics, i.e. StoreStore | LoadLoad | LoadStore. -inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { - intptr_t data = *p; - acquire(); - return data; -} -inline void* OrderAccess::load_ptr_acquire(volatile void* p) { - void *data = *(void* volatile *)p; - acquire(); - return data; -} -inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { - void *data = *(void* const volatile *)p; - acquire(); - return data; -} - -inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { release(); *p = v; } -inline void OrderAccess::release_store(volatile jshort* p, jshort v) { release(); *p = v; } -inline void OrderAccess::release_store(volatile jint* p, jint v) { release(); *p = v; } -inline void OrderAccess::release_store(volatile jlong* p, jlong v) -{ release(); os::atomic_copy64(&v, p); } -inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { release(); *p = v; } -inline void OrderAccess::release_store(volatile jushort* p, jushort v) { release(); *p = v; } -inline void OrderAccess::release_store(volatile juint* p, juint v) { release(); *p = v; } -inline void OrderAccess::release_store(volatile julong* p, julong v) -{ release(); os::atomic_copy64(&v, p); } -inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { release(); *p = v; } -inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) -{ release(); os::atomic_copy64(&v, p); } +inline void OrderAccess::loadload() { LIGHT_MEM_BARRIER; } +inline void OrderAccess::storestore() { LIGHT_MEM_BARRIER; } +inline void OrderAccess::loadstore() { LIGHT_MEM_BARRIER; } +inline void OrderAccess::storeload() { FULL_MEM_BARRIER; } -inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { release(); *p = v; } -inline void OrderAccess::release_store_ptr(volatile void* p, void* v) -{ release(); *(void* volatile *)p = v; } - -inline void OrderAccess::store_fence(jbyte* p, jbyte v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jshort* p, jshort v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jint* p, jint v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jlong* p, jlong v) { os::atomic_copy64(&v, p); fence(); } -inline void OrderAccess::store_fence(jubyte* p, jubyte v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jushort* p, jushort v) { *p = v; fence(); } -inline void OrderAccess::store_fence(juint* p, juint v) { *p = v; fence(); } -inline void OrderAccess::store_fence(julong* p, julong v) { os::atomic_copy64(&v, p); fence(); } -inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jdouble* p, jdouble v) { os::atomic_copy64(&v, p); fence(); } +inline void OrderAccess::acquire() { LIGHT_MEM_BARRIER; } +inline void OrderAccess::release() { LIGHT_MEM_BARRIER; } +inline void OrderAccess::fence() { FULL_MEM_BARRIER; } -inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; fence(); } -inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v; fence(); } - -inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store(p, v); fence(); } - -inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { release_store_ptr(p, v); fence(); } -inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { release_store_ptr(p, v); fence(); } +#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 #endif // OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_INLINE_HPP diff -r 17efac395638 -r a46e5922bb40 hotspot/src/os_cpu/linux_ppc/vm/orderAccess_linux_ppc.inline.hpp --- a/hotspot/src/os_cpu/linux_ppc/vm/orderAccess_linux_ppc.inline.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/os_cpu/linux_ppc/vm/orderAccess_linux_ppc.inline.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2014, SAP AG. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -32,6 +32,9 @@ #error "OrderAccess currently only implemented for PPC64" #endif +// Compiler version last used for testing: gcc 4.1.2 +// Please update this information when this file changes + // Implementation of class OrderAccess. // @@ -65,84 +68,29 @@ #define inlasm_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory"); #define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory"); #define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory"); -#define inlasm_release() inlasm_lwsync(); -#define inlasm_acquire() inlasm_lwsync(); // Use twi-isync for load_acquire (faster than lwsync). #define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory"); -#define inlasm_fence() inlasm_sync(); -inline void OrderAccess::loadload() { inlasm_lwsync(); } -inline void OrderAccess::storestore() { inlasm_lwsync(); } -inline void OrderAccess::loadstore() { inlasm_lwsync(); } -inline void OrderAccess::storeload() { inlasm_fence(); } - -inline void OrderAccess::acquire() { inlasm_acquire(); } -inline void OrderAccess::release() { inlasm_release(); } -inline void OrderAccess::fence() { inlasm_fence(); } - -inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { register jbyte t = *p; inlasm_acquire_reg(t); return t; } -inline jshort OrderAccess::load_acquire(volatile jshort* p) { register jshort t = *p; inlasm_acquire_reg(t); return t; } -inline jint OrderAccess::load_acquire(volatile jint* p) { register jint t = *p; inlasm_acquire_reg(t); return t; } -inline jlong OrderAccess::load_acquire(volatile jlong* p) { register jlong t = *p; inlasm_acquire_reg(t); return t; } -inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { register jubyte t = *p; inlasm_acquire_reg(t); return t; } -inline jushort OrderAccess::load_acquire(volatile jushort* p) { register jushort t = *p; inlasm_acquire_reg(t); return t; } -inline juint OrderAccess::load_acquire(volatile juint* p) { register juint t = *p; inlasm_acquire_reg(t); return t; } -inline julong OrderAccess::load_acquire(volatile julong* p) { return (julong)load_acquire((volatile jlong*)p); } -inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { register jfloat t = *p; inlasm_acquire(); return t; } -inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { register jdouble t = *p; inlasm_acquire(); return t; } - -inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return (intptr_t)load_acquire((volatile jlong*)p); } -inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return (void*) load_acquire((volatile jlong*)p); } -inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return (void*) load_acquire((volatile jlong*)p); } +inline void OrderAccess::loadload() { inlasm_lwsync(); } +inline void OrderAccess::storestore() { inlasm_lwsync(); } +inline void OrderAccess::loadstore() { inlasm_lwsync(); } +inline void OrderAccess::storeload() { inlasm_sync(); } -inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { inlasm_release(); *p = v; } -inline void OrderAccess::release_store(volatile jshort* p, jshort v) { inlasm_release(); *p = v; } -inline void OrderAccess::release_store(volatile jint* p, jint v) { inlasm_release(); *p = v; } -inline void OrderAccess::release_store(volatile jlong* p, jlong v) { inlasm_release(); *p = v; } -inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { inlasm_release(); *p = v; } -inline void OrderAccess::release_store(volatile jushort* p, jushort v) { inlasm_release(); *p = v; } -inline void OrderAccess::release_store(volatile juint* p, juint v) { inlasm_release(); *p = v; } -inline void OrderAccess::release_store(volatile julong* p, julong v) { inlasm_release(); *p = v; } -inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { inlasm_release(); *p = v; } -inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { inlasm_release(); *p = v; } - -inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { inlasm_release(); *p = v; } -inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { inlasm_release(); *(void* volatile *)p = v; } +inline void OrderAccess::acquire() { inlasm_lwsync(); } +inline void OrderAccess::release() { inlasm_lwsync(); } +inline void OrderAccess::fence() { inlasm_sync(); } -inline void OrderAccess::store_fence(jbyte* p, jbyte v) { *p = v; inlasm_fence(); } -inline void OrderAccess::store_fence(jshort* p, jshort v) { *p = v; inlasm_fence(); } -inline void OrderAccess::store_fence(jint* p, jint v) { *p = v; inlasm_fence(); } -inline void OrderAccess::store_fence(jlong* p, jlong v) { *p = v; inlasm_fence(); } -inline void OrderAccess::store_fence(jubyte* p, jubyte v) { *p = v; inlasm_fence(); } -inline void OrderAccess::store_fence(jushort* p, jushort v) { *p = v; inlasm_fence(); } -inline void OrderAccess::store_fence(juint* p, juint v) { *p = v; inlasm_fence(); } -inline void OrderAccess::store_fence(julong* p, julong v) { *p = v; inlasm_fence(); } -inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; inlasm_fence(); } -inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; inlasm_fence(); } - -inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; inlasm_fence(); } -inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v; inlasm_fence(); } - -inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { inlasm_release(); *p = v; inlasm_fence(); } -inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { inlasm_release(); *p = v; inlasm_fence(); } -inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { inlasm_release(); *p = v; inlasm_fence(); } -inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { inlasm_release(); *p = v; inlasm_fence(); } -inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { inlasm_release(); *p = v; inlasm_fence(); } -inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { inlasm_release(); *p = v; inlasm_fence(); } -inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { inlasm_release(); *p = v; inlasm_fence(); } -inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { inlasm_release(); *p = v; inlasm_fence(); } -inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { inlasm_release(); *p = v; inlasm_fence(); } -inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { inlasm_release(); *p = v; inlasm_fence(); } - -inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { inlasm_release(); *p = v; inlasm_fence(); } -inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { inlasm_release(); *(void* volatile *)p = v; inlasm_fence(); } +template<> inline jbyte OrderAccess::specialized_load_acquire (volatile jbyte* p) { register jbyte t = load(p); inlasm_acquire_reg(t); return t; } +template<> inline jshort OrderAccess::specialized_load_acquire(volatile jshort* p) { register jshort t = load(p); inlasm_acquire_reg(t); return t; } +template<> inline jint OrderAccess::specialized_load_acquire (volatile jint* p) { register jint t = load(p); inlasm_acquire_reg(t); return t; } +template<> inline jlong OrderAccess::specialized_load_acquire (volatile jlong* p) { register jlong t = load(p); inlasm_acquire_reg(t); return t; } #undef inlasm_sync #undef inlasm_lwsync #undef inlasm_eieio #undef inlasm_isync -#undef inlasm_release -#undef inlasm_acquire -#undef inlasm_fence +#undef inlasm_acquire_reg + +#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 #endif // OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP diff -r 17efac395638 -r a46e5922bb40 hotspot/src/os_cpu/linux_sparc/vm/orderAccess_linux_sparc.inline.hpp --- a/hotspot/src/os_cpu/linux_sparc/vm/orderAccess_linux_sparc.inline.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/os_cpu/linux_sparc/vm/orderAccess_linux_sparc.inline.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,81 +29,25 @@ // Implementation of class OrderAccess. -// Assume TSO. - -inline void OrderAccess::loadload() { acquire(); } -inline void OrderAccess::storestore() { release(); } -inline void OrderAccess::loadstore() { acquire(); } -inline void OrderAccess::storeload() { fence(); } - -inline void OrderAccess::acquire() { - __asm__ volatile ("nop" : : :); -} - -inline void OrderAccess::release() { - jint* local_dummy = (jint*)&local_dummy; - __asm__ volatile("stw %%g0, [%0]" : : "r" (local_dummy) : "memory"); -} - -inline void OrderAccess::fence() { - __asm__ volatile ("membar #StoreLoad" : : :); +// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions +static inline void compiler_barrier() { + __asm__ volatile ("" : : : "memory"); } -inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; } -inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; } -inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; } -inline jlong OrderAccess::load_acquire(volatile jlong* p) { return *p; } -inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; } -inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; } -inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; } -inline julong OrderAccess::load_acquire(volatile julong* p) { return *p; } -inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; } -inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return *p; } +// Assume TSO. -inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; } -inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; } -inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; } - -inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; } -inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; } -inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; } -inline void OrderAccess::release_store(volatile jlong* p, jlong v) { *p = v; } -inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; } -inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; } -inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; } -inline void OrderAccess::release_store(volatile julong* p, julong v) { *p = v; } -inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; } -inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; } +inline void OrderAccess::loadload() { compiler_barrier(); } +inline void OrderAccess::storestore() { compiler_barrier(); } +inline void OrderAccess::loadstore() { compiler_barrier(); } +inline void OrderAccess::storeload() { fence(); } -inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; } -inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; } - -inline void OrderAccess::store_fence(jbyte* p, jbyte v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jshort* p, jshort v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jint* p, jint v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jlong* p, jlong v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jubyte* p, jubyte v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jushort* p, jushort v) { *p = v; fence(); } -inline void OrderAccess::store_fence(juint* p, juint v) { *p = v; fence(); } -inline void OrderAccess::store_fence(julong* p, julong v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); } +inline void OrderAccess::acquire() { compiler_barrier(); } +inline void OrderAccess::release() { compiler_barrier(); } -inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; fence(); } -inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v; fence(); } +inline void OrderAccess::fence() { + __asm__ volatile ("membar #StoreLoad" : : : "memory"); +} -inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); } - -inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { *p = v; fence(); } -inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { *(void* volatile *)p = v; fence(); } +#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 #endif // OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_INLINE_HPP diff -r 17efac395638 -r a46e5922bb40 hotspot/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp --- a/hotspot/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,9 @@ #include "runtime/orderAccess.hpp" #include "runtime/os.hpp" +// Compiler version last used for testing: gcc 4.8.2 +// Please update this information when this file changes + // Implementation of class OrderAccess. // A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions @@ -36,23 +39,13 @@ __asm__ volatile ("" : : : "memory"); } -inline void OrderAccess::loadload() { acquire(); } -inline void OrderAccess::storestore() { release(); } -inline void OrderAccess::loadstore() { acquire(); } -inline void OrderAccess::storeload() { fence(); } +inline void OrderAccess::loadload() { compiler_barrier(); } +inline void OrderAccess::storestore() { compiler_barrier(); } +inline void OrderAccess::loadstore() { compiler_barrier(); } +inline void OrderAccess::storeload() { fence(); } -inline void OrderAccess::acquire() { - volatile intptr_t local_dummy; -#ifdef AMD64 - __asm__ volatile ("movq 0(%%rsp), %0" : "=r" (local_dummy) : : "memory"); -#else - __asm__ volatile ("movl 0(%%esp),%0" : "=r" (local_dummy) : : "memory"); -#endif // AMD64 -} - -inline void OrderAccess::release() { - compiler_barrier(); -} +inline void OrderAccess::acquire() { compiler_barrier(); } +inline void OrderAccess::release() { compiler_barrier(); } inline void OrderAccess::fence() { if (os::is_MP()) { @@ -63,156 +56,50 @@ __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory"); #endif } + compiler_barrier(); } -inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { jbyte v = *p; compiler_barrier(); return v; } -inline jshort OrderAccess::load_acquire(volatile jshort* p) { jshort v = *p; compiler_barrier(); return v; } -inline jint OrderAccess::load_acquire(volatile jint* p) { jint v = *p; compiler_barrier(); return v; } -inline jlong OrderAccess::load_acquire(volatile jlong* p) { jlong v = Atomic::load(p); compiler_barrier(); return v; } -inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { jubyte v = *p; compiler_barrier(); return v; } -inline jushort OrderAccess::load_acquire(volatile jushort* p) { jushort v = *p; compiler_barrier(); return v; } -inline juint OrderAccess::load_acquire(volatile juint* p) { juint v = *p; compiler_barrier(); return v; } -inline julong OrderAccess::load_acquire(volatile julong* p) { julong v = Atomic::load((volatile jlong*)p); compiler_barrier(); return v; } -inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { jfloat v = *p; compiler_barrier(); return v; } -inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { jdouble v = jdouble_cast(Atomic::load((volatile jlong*)p)); compiler_barrier(); return v; } - -inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { intptr_t v = *p; compiler_barrier(); return v; } -inline void* OrderAccess::load_ptr_acquire(volatile void* p) { void* v = *(void* volatile *)p; compiler_barrier(); return v; } -inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { void* v = *(void* const volatile *)p; compiler_barrier(); return v; } - -inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { compiler_barrier(); *p = v; } -inline void OrderAccess::release_store(volatile jshort* p, jshort v) { compiler_barrier(); *p = v; } -inline void OrderAccess::release_store(volatile jint* p, jint v) { compiler_barrier(); *p = v; } -inline void OrderAccess::release_store(volatile jlong* p, jlong v) { compiler_barrier(); Atomic::store(v, p); } -inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { compiler_barrier(); *p = v; } -inline void OrderAccess::release_store(volatile jushort* p, jushort v) { compiler_barrier(); *p = v; } -inline void OrderAccess::release_store(volatile juint* p, juint v) { compiler_barrier(); *p = v; } -inline void OrderAccess::release_store(volatile julong* p, julong v) { compiler_barrier(); Atomic::store((jlong)v, (volatile jlong*)p); } -inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { compiler_barrier(); *p = v; } -inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong *)p, jlong_cast(v)); } - -inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { compiler_barrier(); *p = v; } -inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { compiler_barrier(); *(void* volatile *)p = v; } - -inline void OrderAccess::store_fence(jbyte* p, jbyte v) { +template<> +inline void OrderAccess::specialized_release_store_fence (volatile jbyte* p, jbyte v) { __asm__ volatile ( "xchgb (%2),%0" : "=q" (v) : "0" (v), "r" (p) : "memory"); } -inline void OrderAccess::store_fence(jshort* p, jshort v) { +template<> +inline void OrderAccess::specialized_release_store_fence(volatile jshort* p, jshort v) { __asm__ volatile ( "xchgw (%2),%0" : "=r" (v) : "0" (v), "r" (p) : "memory"); } -inline void OrderAccess::store_fence(jint* p, jint v) { +template<> +inline void OrderAccess::specialized_release_store_fence (volatile jint* p, jint v) { __asm__ volatile ( "xchgl (%2),%0" : "=r" (v) : "0" (v), "r" (p) : "memory"); } -inline void OrderAccess::store_fence(jlong* p, jlong v) { #ifdef AMD64 - __asm__ __volatile__ ("xchgq (%2), %0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); -#else - *p = v; fence(); -#endif // AMD64 -} - -// AMD64 copied the bodies for the the signed version. 32bit did this. As long as the -// compiler does the inlining this is simpler. -inline void OrderAccess::store_fence(jubyte* p, jubyte v) { store_fence((jbyte*)p, (jbyte)v); } -inline void OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); } -inline void OrderAccess::store_fence(juint* p, juint v) { store_fence((jint*)p, (jint)v); } -inline void OrderAccess::store_fence(julong* p, julong v) { store_fence((jlong*)p, (jlong)v); } -inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jdouble* p, jdouble v) { store_fence((jlong*)p, jlong_cast(v)); } - -inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { -#ifdef AMD64 - __asm__ __volatile__ ("xchgq (%2), %0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); -#else - store_fence((jint*)p, (jint)v); -#endif // AMD64 -} - -inline void OrderAccess::store_ptr_fence(void** p, void* v) { -#ifdef AMD64 - __asm__ __volatile__ ("xchgq (%2), %0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); -#else - store_fence((jint*)p, (jint)v); -#endif // AMD64 -} - -// Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile. -inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { - __asm__ volatile ( "xchgb (%2),%0" - : "=q" (v) - : "0" (v), "r" (p) - : "memory"); -} -inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { - __asm__ volatile ( "xchgw (%2),%0" +template<> +inline void OrderAccess::specialized_release_store_fence (volatile jlong* p, jlong v) { + __asm__ volatile ( "xchgq (%2), %0" : "=r" (v) : "0" (v), "r" (p) : "memory"); } -inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { - __asm__ volatile ( "xchgl (%2),%0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); -} +#endif // AMD64 -inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { -#ifdef AMD64 - __asm__ __volatile__ ( "xchgq (%2), %0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); -#else - release_store(p, v); fence(); -#endif // AMD64 +template<> +inline void OrderAccess::specialized_release_store_fence (volatile jfloat* p, jfloat v) { + release_store_fence((volatile jint*)p, jint_cast(v)); +} +template<> +inline void OrderAccess::specialized_release_store_fence(volatile jdouble* p, jdouble v) { + release_store_fence((volatile jlong*)p, jlong_cast(v)); } -inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store_fence((volatile jbyte*)p, (jbyte)v); } -inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); } -inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { release_store_fence((volatile jint*)p, (jint)v); } -inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store_fence((volatile jlong*)p, (jlong)v); } - -inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store_fence((volatile jlong*)p, jlong_cast(v)); } - -inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { -#ifdef AMD64 - __asm__ __volatile__ ( "xchgq (%2), %0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); -#else - release_store_fence((volatile jint*)p, (jint)v); -#endif // AMD64 -} -inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { -#ifdef AMD64 - __asm__ __volatile__ ( "xchgq (%2), %0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); -#else - release_store_fence((volatile jint*)p, (jint)v); -#endif // AMD64 -} +#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 #endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP diff -r 17efac395638 -r a46e5922bb40 hotspot/src/os_cpu/linux_zero/vm/orderAccess_linux_zero.inline.hpp --- a/hotspot/src/os_cpu/linux_zero/vm/orderAccess_linux_zero.inline.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/os_cpu/linux_zero/vm/orderAccess_linux_zero.inline.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2009 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -40,8 +40,7 @@ #define __kernel_dmb (*(__kernel_dmb_t *) 0xffff0fa0) #define FULL_MEM_BARRIER __kernel_dmb() -#define READ_MEM_BARRIER __kernel_dmb() -#define WRITE_MEM_BARRIER __kernel_dmb() +#define LIGHT_MEM_BARRIER __kernel_dmb() #else // ARM @@ -49,126 +48,33 @@ #ifdef PPC -#define READ_MEM_BARRIER __asm __volatile ("isync":::"memory") #ifdef __NO_LWSYNC__ -#define WRITE_MEM_BARRIER __asm __volatile ("sync":::"memory") +#define LIGHT_MEM_BARRIER __asm __volatile ("sync":::"memory") #else -#define WRITE_MEM_BARRIER __asm __volatile ("lwsync":::"memory") +#define LIGHT_MEM_BARRIER __asm __volatile ("lwsync":::"memory") #endif #else // PPC -#define READ_MEM_BARRIER __asm __volatile ("":::"memory") -#define WRITE_MEM_BARRIER __asm __volatile ("":::"memory") +#define LIGHT_MEM_BARRIER __asm __volatile ("":::"memory") #endif // PPC #endif // ARM - -inline void OrderAccess::loadload() { acquire(); } -inline void OrderAccess::storestore() { release(); } -inline void OrderAccess::loadstore() { acquire(); } -inline void OrderAccess::storeload() { fence(); } - -inline void OrderAccess::acquire() { - READ_MEM_BARRIER; -} - -inline void OrderAccess::release() { - WRITE_MEM_BARRIER; -} - -inline void OrderAccess::fence() { - FULL_MEM_BARRIER; -} - -inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { jbyte data = *p; acquire(); return data; } -inline jshort OrderAccess::load_acquire(volatile jshort* p) { jshort data = *p; acquire(); return data; } -inline jint OrderAccess::load_acquire(volatile jint* p) { jint data = *p; acquire(); return data; } -inline jlong OrderAccess::load_acquire(volatile jlong* p) { - jlong tmp; - os::atomic_copy64(p, &tmp); - acquire(); - return tmp; -} -inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { jubyte data = *p; acquire(); return data; } -inline jushort OrderAccess::load_acquire(volatile jushort* p) { jushort data = *p; acquire(); return data; } -inline juint OrderAccess::load_acquire(volatile juint* p) { juint data = *p; acquire(); return data; } -inline julong OrderAccess::load_acquire(volatile julong* p) { - julong tmp; - os::atomic_copy64(p, &tmp); - acquire(); - return tmp; -} -inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { jfloat data = *p; acquire(); return data; } -inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { - jdouble tmp; - os::atomic_copy64(p, &tmp); - acquire(); - return tmp; -} +// Note: What is meant by LIGHT_MEM_BARRIER is a barrier which is sufficient +// to provide TSO semantics, i.e. StoreStore | LoadLoad | LoadStore. -inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { - intptr_t data = *p; - acquire(); - return data; -} -inline void* OrderAccess::load_ptr_acquire(volatile void* p) { - void *data = *(void* volatile *)p; - acquire(); - return data; -} -inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { - void *data = *(void* const volatile *)p; - acquire(); - return data; -} - -inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { release(); *p = v; } -inline void OrderAccess::release_store(volatile jshort* p, jshort v) { release(); *p = v; } -inline void OrderAccess::release_store(volatile jint* p, jint v) { release(); *p = v; } -inline void OrderAccess::release_store(volatile jlong* p, jlong v) -{ release(); os::atomic_copy64(&v, p); } -inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { release(); *p = v; } -inline void OrderAccess::release_store(volatile jushort* p, jushort v) { release(); *p = v; } -inline void OrderAccess::release_store(volatile juint* p, juint v) { release(); *p = v; } -inline void OrderAccess::release_store(volatile julong* p, julong v) -{ release(); os::atomic_copy64(&v, p); } -inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { release(); *p = v; } -inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) -{ release(); os::atomic_copy64(&v, p); } +inline void OrderAccess::loadload() { LIGHT_MEM_BARRIER; } +inline void OrderAccess::storestore() { LIGHT_MEM_BARRIER; } +inline void OrderAccess::loadstore() { LIGHT_MEM_BARRIER; } +inline void OrderAccess::storeload() { FULL_MEM_BARRIER; } -inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { release(); *p = v; } -inline void OrderAccess::release_store_ptr(volatile void* p, void* v) -{ release(); *(void* volatile *)p = v; } - -inline void OrderAccess::store_fence(jbyte* p, jbyte v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jshort* p, jshort v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jint* p, jint v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jlong* p, jlong v) { os::atomic_copy64(&v, p); fence(); } -inline void OrderAccess::store_fence(jubyte* p, jubyte v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jushort* p, jushort v) { *p = v; fence(); } -inline void OrderAccess::store_fence(juint* p, juint v) { *p = v; fence(); } -inline void OrderAccess::store_fence(julong* p, julong v) { os::atomic_copy64(&v, p); fence(); } -inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jdouble* p, jdouble v) { os::atomic_copy64(&v, p); fence(); } +inline void OrderAccess::acquire() { LIGHT_MEM_BARRIER; } +inline void OrderAccess::release() { LIGHT_MEM_BARRIER; } -inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; fence(); } -inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v; fence(); } +inline void OrderAccess::fence() { FULL_MEM_BARRIER; } -inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store(p, v); fence(); } - -inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { release_store_ptr(p, v); fence(); } -inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { release_store_ptr(p, v); fence(); } +#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 #endif // OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_INLINE_HPP diff -r 17efac395638 -r a46e5922bb40 hotspot/src/os_cpu/solaris_sparc/vm/orderAccess_solaris_sparc.inline.hpp --- a/hotspot/src/os_cpu/solaris_sparc/vm/orderAccess_solaris_sparc.inline.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/os_cpu/solaris_sparc/vm/orderAccess_solaris_sparc.inline.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,107 +28,30 @@ #include "runtime/atomic.inline.hpp" #include "runtime/orderAccess.hpp" +// Compiler version last used for testing: solaris studio 12u3 +// Please update this information when this file changes + // Implementation of class OrderAccess. // Assume TSO. -// In solaris_sparc.il -extern "C" void _OrderAccess_acquire(); -extern "C" void _OrderAccess_fence(); - -inline void OrderAccess::loadload() { acquire(); } -inline void OrderAccess::storestore() { release(); } -inline void OrderAccess::loadstore() { acquire(); } -inline void OrderAccess::storeload() { fence(); } - -#ifdef _GNU_SOURCE - -inline void OrderAccess::acquire() { - __asm__ volatile ("nop" : : :); -} - -inline void OrderAccess::release() { - jint* local_dummy = (jint*)&local_dummy; - __asm__ volatile("stw %%g0, [%0]" : : "r" (local_dummy) : "memory"); -} - -inline void OrderAccess::fence() { - __asm__ volatile ("membar #StoreLoad" : : :); -} - -#else // _GNU_SOURCE - -inline void OrderAccess::acquire() { - _OrderAccess_acquire(); -} - -inline void OrderAccess::release() { - // Avoid hitting the same cache-line from - // different threads. - volatile jint local_dummy = 0; -} - -inline void OrderAccess::fence() { - _OrderAccess_fence(); +// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions +inline void compiler_barrier() { + __asm__ volatile ("" : : : "memory"); } -#endif // _GNU_SOURCE - -inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; } -inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; } -inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; } -inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); } -inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; } -inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; } -inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; } -inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); } -inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; } -inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return *p; } - -inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; } -inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; } -inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; } - -inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; } -inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; } -inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; } -inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); } -inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; } -inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; } -inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; } -inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); } -inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; } -inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; } +inline void OrderAccess::loadload() { compiler_barrier(); } +inline void OrderAccess::storestore() { compiler_barrier(); } +inline void OrderAccess::loadstore() { compiler_barrier(); } +inline void OrderAccess::storeload() { fence(); } -inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; } -inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; } - -inline void OrderAccess::store_fence(jbyte* p, jbyte v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jshort* p, jshort v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jint* p, jint v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jlong* p, jlong v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jubyte* p, jubyte v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jushort* p, jushort v) { *p = v; fence(); } -inline void OrderAccess::store_fence(juint* p, juint v) { *p = v; fence(); } -inline void OrderAccess::store_fence(julong* p, julong v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); } +inline void OrderAccess::acquire() { compiler_barrier(); } +inline void OrderAccess::release() { compiler_barrier(); } -inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; fence(); } -inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v; fence(); } +inline void OrderAccess::fence() { + __asm__ volatile ("membar #StoreLoad" : : : "memory"); +} -inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); } - -inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { *p = v; fence(); } -inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { *(void* volatile *)p = v; fence(); } +#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 #endif // OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP diff -r 17efac395638 -r a46e5922bb40 hotspot/src/os_cpu/solaris_sparc/vm/solaris_sparc.il --- a/hotspot/src/os_cpu/solaris_sparc/vm/solaris_sparc.il Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/os_cpu/solaris_sparc/vm/solaris_sparc.il Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ // -// Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA // or visit www.oracle.com if you need additional information or have any // questions. -// +// // // Get the raw thread ID from %g7 @@ -35,11 +35,11 @@ // Clear SPARC fprs.FEF DU and DL bits -- // allows the kernel to avoid saving FPU state at context-switch time. // Use for state-transition points (into _thread_blocked) or when - // parking. - + // parking. + .inline _mark_fpu_nosave, 0 .volatile - wr %g0, 0, %fprs + wr %g0, 0, %fprs .nonvolatile .end @@ -85,7 +85,7 @@ // Support for jint Atomic::cmpxchg(jint exchange_value, - // volatile jint* dest, + // volatile jint* dest, // jint compare_value) // // Arguments: @@ -103,8 +103,8 @@ .end - // Support for intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, - // volatile intptr_t* dest, + // Support for intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, + // volatile intptr_t* dest, // intptr_t compare_value) // // 64-bit @@ -124,8 +124,8 @@ .end - // Support for jlong Atomic::cmpxchg(jlong exchange_value, - // volatile jlong* dest, + // Support for jlong Atomic::cmpxchg(jlong exchange_value, + // volatile jlong* dest, // jlong compare_value) // // 32-bit calling conventions @@ -221,27 +221,6 @@ .end - // Support for void OrderAccess::acquire() - // The method is intentionally empty. - // It exists for the sole purpose of generating - // a C/C++ sequence point over which the compiler won't - // reorder code. - - .inline _OrderAccess_acquire,0 - .volatile - .nonvolatile - .end - - - // Support for void OrderAccess::fence() - - .inline _OrderAccess_fence,0 - .volatile - membar #StoreLoad - .nonvolatile - .end - - // Support for void Prefetch::read(void *loc, intx interval) // // Prefetch for several reads. diff -r 17efac395638 -r a46e5922bb40 hotspot/src/os_cpu/solaris_x86/vm/orderAccess_solaris_x86.inline.hpp --- a/hotspot/src/os_cpu/solaris_x86/vm/orderAccess_solaris_x86.inline.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/os_cpu/solaris_x86/vm/orderAccess_solaris_x86.inline.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,110 +29,35 @@ #include "runtime/orderAccess.hpp" #include "runtime/os.hpp" +// Compiler version last used for testing: solaris studio 12u3 +// Please update this information when this file changes + // Implementation of class OrderAccess. -// For Sun Studio - implementation is in solaris_i486.il. -// For gcc - implementation is just below. -extern "C" void _OrderAccess_acquire(); -extern "C" void _OrderAccess_fence(); - -inline void OrderAccess::loadload() { acquire(); } -inline void OrderAccess::storestore() { release(); } -inline void OrderAccess::loadstore() { acquire(); } -inline void OrderAccess::storeload() { fence(); } - -inline void OrderAccess::acquire() { - _OrderAccess_acquire(); - +// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions +inline void compiler_barrier() { + __asm__ volatile ("" : : : "memory"); } -inline void OrderAccess::release() { - // Avoid hitting the same cache-line from - // different threads. - volatile jint local_dummy = 0; -} +inline void OrderAccess::loadload() { compiler_barrier(); } +inline void OrderAccess::storestore() { compiler_barrier(); } +inline void OrderAccess::loadstore() { compiler_barrier(); } +inline void OrderAccess::storeload() { fence(); } + +inline void OrderAccess::acquire() { compiler_barrier(); } +inline void OrderAccess::release() { compiler_barrier(); } inline void OrderAccess::fence() { if (os::is_MP()) { - _OrderAccess_fence(); +#ifdef AMD64 + __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory"); +#else + __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory"); +#endif } -} - -#ifdef _GNU_SOURCE - -extern "C" { - inline void _OrderAccess_acquire() { - volatile intptr_t local_dummy; -#ifdef AMD64 - __asm__ volatile ("movq 0(%%rsp), %0" : "=r" (local_dummy) : : "memory"); -#else - __asm__ volatile ("movl 0(%%esp),%0" : "=r" (local_dummy) : : "memory"); -#endif // AMD64 - } - inline void _OrderAccess_fence() { - // Always use locked addl since mfence is sometimes expensive - __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory"); - } - + compiler_barrier(); } -#endif // GNU_SOURCE - -inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; } -inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; } -inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; } -inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); } -inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; } -inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; } -inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; } -inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); } -inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; } -inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return jdouble_cast(Atomic::load((volatile jlong*)p)); } - -inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; } -inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; } -inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; } - -inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; } -inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; } -inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; } -inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); } -inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; } -inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; } -inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; } -inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); } -inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; } -inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong*)p, jlong_cast(v)); } - -inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; } -inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; } - -inline void OrderAccess::store_fence(jbyte* p, jbyte v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jshort* p, jshort v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jint* p, jint v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jlong* p, jlong v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jubyte* p, jubyte v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jushort* p, jushort v) { *p = v; fence(); } -inline void OrderAccess::store_fence(juint* p, juint v) { *p = v; fence(); } -inline void OrderAccess::store_fence(julong* p, julong v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); } - -inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; fence(); } -inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v; fence(); } - -inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { release_store(p, v); fence(); } -inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store((jlong *)p, (jlong)v); fence(); } -inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store_fence((volatile jlong*)p, jlong_cast(v)); } - -inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { *p = v; fence(); } -inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { *(void* volatile *)p = v; fence(); } +#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 #endif // OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP diff -r 17efac395638 -r a46e5922bb40 hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.il --- a/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.il Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.il Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ // -// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA // or visit www.oracle.com if you need additional information or have any // questions. -// +// // @@ -34,19 +34,19 @@ // Get the raw thread ID from %gs:0 .inline _raw_thread_id,0 - movl %gs:0, %eax + movl %gs:0, %eax .end // Get current sp .inline _get_current_sp,0 .volatile - movl %esp, %eax + movl %esp, %eax .end // Get current fp .inline _get_current_fp,0 .volatile - movl %ebp, %eax + movl %ebp, %eax .end // Support for os::rdtsc() @@ -76,8 +76,8 @@ xchgl (%ecx), %eax .end - // Support for jbyte Atomic::cmpxchg(jbyte exchange_value, - // volatile jbyte *dest, + // Support for jbyte Atomic::cmpxchg(jbyte exchange_value, + // volatile jbyte *dest, // jbyte compare_value) // An additional bool (os::is_MP()) is passed as the last argument. .inline _Atomic_cmpxchg_byte,4 @@ -93,8 +93,8 @@ 2: .end - // Support for jint Atomic::cmpxchg(jint exchange_value, - // volatile jint *dest, + // Support for jint Atomic::cmpxchg(jint exchange_value, + // volatile jint *dest, // jint compare_value) // An additional bool (os::is_MP()) is passed as the last argument. .inline _Atomic_cmpxchg,4 @@ -141,17 +141,6 @@ fistpll (%eax) .end - // Support for OrderAccess::acquire() - .inline _OrderAccess_acquire,0 - movl 0(%esp), %eax - .end - - // Support for OrderAccess::fence() - .inline _OrderAccess_fence,0 - lock - addl $0, (%esp) - .end - // Support for u2 Bytes::swap_u2(u2 x) .inline _raw_swap_u2,1 movl 0(%esp), %eax diff -r 17efac395638 -r a46e5922bb40 hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_64.il --- a/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_64.il Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_64.il Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ // -// Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2004, 2015, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA // or visit www.oracle.com if you need additional information or have any // questions. -// +// // // The argument size of each inline directive is ignored by the compiler @@ -27,19 +27,19 @@ // Get the raw thread ID from %gs:0 .inline _raw_thread_id,0 - movq %fs:0, %rax + movq %fs:0, %rax .end // Get current sp .inline _get_current_sp,0 .volatile - movq %rsp, %rax + movq %rsp, %rax .end // Get current fp .inline _get_current_fp,0 .volatile - movq %rbp, %rax + movq %rbp, %rax .end // Support for os::rdtsc() @@ -77,8 +77,8 @@ movq %rdi, %rax .end - // Support for jbyte Atomic::cmpxchg(jbyte exchange_value, - // volatile jbyte *dest, + // Support for jbyte Atomic::cmpxchg(jbyte exchange_value, + // volatile jbyte *dest, // jbyte compare_value) .inline _Atomic_cmpxchg_byte,3 movb %dl, %al // compare_value @@ -86,8 +86,8 @@ cmpxchgb %dil, (%rsi) .end - // Support for jint Atomic::cmpxchg(jint exchange_value, - // volatile jint *dest, + // Support for jint Atomic::cmpxchg(jint exchange_value, + // volatile jint *dest, // jint compare_value) .inline _Atomic_cmpxchg,3 movl %edx, %eax // compare_value @@ -104,17 +104,6 @@ cmpxchgq %rdi, (%rsi) .end - // Support for OrderAccess::acquire() - .inline _OrderAccess_acquire,0 - movl 0(%rsp), %eax - .end - - // Support for OrderAccess::fence() - .inline _OrderAccess_fence,0 - lock - addl $0, (%rsp) - .end - // Support for u2 Bytes::swap_u2(u2 x) .inline _raw_swap_u2,1 movw %di, %ax diff -r 17efac395638 -r a46e5922bb40 hotspot/src/os_cpu/windows_x86/vm/orderAccess_windows_x86.inline.hpp --- a/hotspot/src/os_cpu/windows_x86/vm/orderAccess_windows_x86.inline.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/os_cpu/windows_x86/vm/orderAccess_windows_x86.inline.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,29 +25,39 @@ #ifndef OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP #define OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP +#include #include "runtime/atomic.inline.hpp" #include "runtime/orderAccess.hpp" #include "runtime/os.hpp" +// Compiler version last used for testing: Microsoft Visual Studio 2010 +// Please update this information when this file changes + // Implementation of class OrderAccess. -inline void OrderAccess::loadload() { acquire(); } -inline void OrderAccess::storestore() { release(); } -inline void OrderAccess::loadstore() { acquire(); } +// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions +inline void compiler_barrier() { + _ReadWriteBarrier(); +} + +// Note that in MSVC, volatile memory accesses are explicitly +// guaranteed to have acquire release semantics (w.r.t. compiler +// reordering) and therefore does not even need a compiler barrier +// for normal acquire release accesses. And all generalized +// bound calls like release_store go through OrderAccess::load +// and OrderAccess::store which do volatile memory accesses. +template<> inline void ScopedFence::postfix() { } +template<> inline void ScopedFence::prefix() { } +template<> inline void ScopedFence::prefix() { } +template<> inline void ScopedFence::postfix() { OrderAccess::fence(); } + +inline void OrderAccess::loadload() { compiler_barrier(); } +inline void OrderAccess::storestore() { compiler_barrier(); } +inline void OrderAccess::loadstore() { compiler_barrier(); } inline void OrderAccess::storeload() { fence(); } -inline void OrderAccess::acquire() { -#ifndef AMD64 - __asm { - mov eax, dword ptr [esp]; - } -#endif // !AMD64 -} - -inline void OrderAccess::release() { - // A volatile store has release semantics. - volatile jint local_dummy = 0; -} +inline void OrderAccess::acquire() { compiler_barrier(); } +inline void OrderAccess::release() { compiler_barrier(); } inline void OrderAccess::fence() { #ifdef AMD64 @@ -59,157 +69,47 @@ } } #endif // AMD64 + compiler_barrier(); } -inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; } -inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; } -inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; } -inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); } -inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; } -inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; } -inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; } -inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); } -inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; } -inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return jdouble_cast(Atomic::load((volatile jlong*)p)); } - -inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; } -inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; } -inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; } - -inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; } -inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; } -inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; } -inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); } -inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; } -inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; } -inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; } -inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); } -inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; } -inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong*)p, jlong_cast(v)); } - -inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; } -inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; } - -inline void OrderAccess::store_fence(jbyte* p, jbyte v) { -#ifdef AMD64 - *p = v; fence(); -#else +#ifndef AMD64 +template<> +inline void OrderAccess::specialized_release_store_fence (volatile jbyte* p, jbyte v) { __asm { mov edx, p; mov al, v; xchg al, byte ptr [edx]; } -#endif // AMD64 } -inline void OrderAccess::store_fence(jshort* p, jshort v) { -#ifdef AMD64 - *p = v; fence(); -#else +template<> +inline void OrderAccess::specialized_release_store_fence(volatile jshort* p, jshort v) { __asm { mov edx, p; mov ax, v; xchg ax, word ptr [edx]; } -#endif // AMD64 } -inline void OrderAccess::store_fence(jint* p, jint v) { -#ifdef AMD64 - *p = v; fence(); -#else +template<> +inline void OrderAccess::specialized_release_store_fence (volatile jint* p, jint v) { __asm { mov edx, p; mov eax, v; xchg eax, dword ptr [edx]; } +} #endif // AMD64 -} - -inline void OrderAccess::store_fence(jlong* p, jlong v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jubyte* p, jubyte v) { store_fence((jbyte*)p, (jbyte)v); } -inline void OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); } -inline void OrderAccess::store_fence(juint* p, juint v) { store_fence((jint*)p, (jint)v); } -inline void OrderAccess::store_fence(julong* p, julong v) { store_fence((jlong*)p, (jlong)v); } -inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); } -inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); } - -inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { -#ifdef AMD64 - *p = v; fence(); -#else - store_fence((jint*)p, (jint)v); -#endif // AMD64 -} -inline void OrderAccess::store_ptr_fence(void** p, void* v) { -#ifdef AMD64 - *p = v; fence(); -#else - store_fence((jint*)p, (jint)v); -#endif // AMD64 +template<> +inline void OrderAccess::specialized_release_store_fence(volatile jfloat* p, jfloat v) { + release_store_fence((volatile jint*)p, jint_cast(v)); } - -// Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile. -inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { -#ifdef AMD64 - *p = v; fence(); -#else - __asm { - mov edx, p; - mov al, v; - xchg al, byte ptr [edx]; - } -#endif // AMD64 +template<> +inline void OrderAccess::specialized_release_store_fence(volatile jdouble* p, jdouble v) { + release_store_fence((volatile jlong*)p, jlong_cast(v)); } -inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { -#ifdef AMD64 - *p = v; fence(); -#else - __asm { - mov edx, p; - mov ax, v; - xchg ax, word ptr [edx]; - } -#endif // AMD64 -} - -inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { -#ifdef AMD64 - *p = v; fence(); -#else - __asm { - mov edx, p; - mov eax, v; - xchg eax, dword ptr [edx]; - } -#endif // AMD64 -} - -inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { release_store(p, v); fence(); } - -inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store_fence((volatile jbyte*)p, (jbyte)v); } -inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); } -inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { release_store_fence((volatile jint*)p, (jint)v); } -inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store_fence((volatile jlong*)p, (jlong)v); } -inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); } -inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store_fence((volatile jlong*)p, jlong_cast(v)); } - -inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { -#ifdef AMD64 - *p = v; fence(); -#else - release_store_fence((volatile jint*)p, (jint)v); -#endif // AMD64 -} - -inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { -#ifdef AMD64 - *(void* volatile *)p = v; fence(); -#else - release_store_fence((volatile jint*)p, (jint)v); -#endif // AMD64 -} +#define VM_HAS_GENERALIZED_ORDER_ACCESS 1 #endif // OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/c1/c1_LIR.cpp --- a/hotspot/src/share/vm/c1/c1_LIR.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/c1/c1_LIR.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -142,16 +142,11 @@ #ifndef PRODUCT -void LIR_Address::verify() const { +void LIR_Address::verify0() const { #if defined(SPARC) || defined(PPC) assert(scale() == times_1, "Scaled addressing mode not available on SPARC/PPC and should not be used"); assert(disp() == 0 || index()->is_illegal(), "can't have both"); #endif -#ifdef ARM - assert(disp() == 0 || index()->is_illegal(), "can't have both"); - // Note: offsets higher than 4096 must not be rejected here. They can - // be handled by the back-end or will be rejected if not. -#endif #ifdef _LP64 assert(base()->is_cpu_register(), "wrong base operand"); #ifndef AARCH64 diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/c1/c1_LIR.hpp --- a/hotspot/src/share/vm/c1/c1_LIR.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/c1/c1_LIR.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_C1_C1_LIR_HPP #define SHARE_VM_C1_C1_LIR_HPP +#include "c1/c1_Defs.hpp" #include "c1/c1_ValueType.hpp" #include "oops/method.hpp" @@ -561,7 +562,13 @@ virtual BasicType type() const { return _type; } virtual void print_value_on(outputStream* out) const PRODUCT_RETURN; - void verify() const PRODUCT_RETURN; + void verify0() const PRODUCT_RETURN; +#if defined(LIR_ADDRESS_PD_VERIFY) && !defined(PRODUCT) + void pd_verify() const; + void verify() const { pd_verify(); } +#else + void verify() const { verify0(); } +#endif static Scale scale(BasicType type); }; @@ -610,7 +617,7 @@ LIR_OprDesc::float_type | LIR_OprDesc::fpu_register | LIR_OprDesc::single_size); } -#if defined(ARM) +#if defined(ARM32) static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); } static LIR_Opr single_softfp(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::float_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); } static LIR_Opr double_softfp(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size); } diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/c1/c1_LIRGenerator.cpp --- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "c1/c1_Defs.hpp" #include "c1/c1_Compilation.hpp" #include "c1/c1_FrameMap.hpp" #include "c1/c1_Instruction.hpp" @@ -49,10 +50,7 @@ #define __ gen()->lir()-> #endif -// TODO: ARM - Use some recognizable constant which still fits architectural constraints -#ifdef ARM -#define PATCHED_ADDR (204) -#else +#ifndef PATCHED_ADDR #define PATCHED_ADDR (max_jint) #endif @@ -1600,24 +1598,9 @@ } assert(addr->is_register(), "must be a register at this point"); -#ifdef ARM - // TODO: ARM - move to platform-dependent code - LIR_Opr tmp = FrameMap::R14_opr; - if (VM_Version::supports_movw()) { - __ move((LIR_Opr)card_table_base, tmp); - } else { - __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp); - } - - LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE); - if(((int)ct->byte_map_base & 0xff) == 0) { - __ move(tmp, card_addr); - } else { - LIR_Opr tmp_zero = new_register(T_INT); - __ move(LIR_OprFact::intConst(0), tmp_zero); - __ move(tmp_zero, card_addr); - } -#else // ARM +#ifdef CARDTABLEMODREF_POST_BARRIER_HELPER + CardTableModRef_post_barrier_helper(addr, card_table_base); +#else LIR_Opr tmp = new_pointer_register(); if (TwoOperandLIRForm) { __ move(addr, tmp); @@ -1633,7 +1616,7 @@ new LIR_Address(tmp, load_constant(card_table_base), T_BYTE)); } -#endif // ARM +#endif } @@ -2123,7 +2106,7 @@ } else { #ifdef X86 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type); -#elif defined(ARM) +#elif defined(GENERATE_ADDRESS_IS_PREFERRED) addr = generate_address(base_op, index_op, log2_scale, 0, dst_type); #else if (index_op->is_illegal() || log2_scale == 0) { @@ -2177,6 +2160,9 @@ LIR_Opr base_op = base.result(); LIR_Opr index_op = idx.result(); +#ifdef GENERATE_ADDRESS_IS_PREFERRED + LIR_Address* addr = generate_address(base_op, index_op, log2_scale, 0, x->basic_type()); +#else #ifndef _LP64 if (base_op->type() == T_LONG) { base_op = new_register(T_INT); @@ -2210,6 +2196,7 @@ } LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type()); +#endif // !GENERATE_ADDRESS_IS_PREFERRED __ move(value.result(), addr); } diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/c1/c1_LIRGenerator.hpp --- a/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -275,6 +275,9 @@ void G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val); void CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val); +#ifdef CARDTABLEMODREF_POST_BARRIER_HELPER + void CardTableModRef_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base); +#endif static LIR_Opr result_register_for(ValueType* type, bool callee = false); @@ -546,6 +549,10 @@ #ifdef ASSERT virtual void do_Assert (Assert* x); #endif + +#ifdef C1_LIRGENERATOR_MD_HPP +#include C1_LIRGENERATOR_MD_HPP +#endif }; diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/c1/c1_LinearScan.cpp --- a/hotspot/src/share/vm/c1/c1_LinearScan.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/c1/c1_LinearScan.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2123,7 +2123,7 @@ assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register"); assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even"); LIR_Opr result = LIR_OprFact::double_fpu(interval->assigned_regHi() - pd_first_fpu_reg, assigned_reg - pd_first_fpu_reg); -#elif defined(ARM) +#elif defined(ARM32) assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register"); assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register"); assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even"); @@ -2712,7 +2712,7 @@ #ifdef SPARC assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)"); #endif -#ifdef ARM +#ifdef ARM32 assert(opr->fpu_regnrHi() == opr->fpu_regnrLo() + 1, "assumed in calculation (only fpu_regnrLo is used)"); #endif #ifdef PPC diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/c1/c1_Runtime1.cpp --- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1120,7 +1120,7 @@ #ifdef ARM if((load_klass_or_mirror_patch_id || stub_id == Runtime1::load_appendix_patching_id) && - !VM_Version::supports_movw()) { + nativeMovConstReg_at(copy_buff)->is_pc_relative()) { nmethod* nm = CodeCache::find_nmethod(instr_pc); address addr = NULL; assert(nm != NULL, "invalid nmethod_pc"); diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/classfile/classLoaderData.cpp --- a/hotspot/src/share/vm/classfile/classLoaderData.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -902,7 +902,7 @@ } Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass() { - Klass* head = (Klass*)_next_klass; + Klass* head = _next_klass; while (head != NULL) { Klass* next = next_klass_in_cldg(head); diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/classfile/classLoaderData.hpp --- a/hotspot/src/share/vm/classfile/classLoaderData.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/classfile/classLoaderData.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -315,7 +315,7 @@ // An iterator that distributes Klasses to parallel worker threads. class ClassLoaderDataGraphKlassIteratorAtomic : public StackObj { - volatile Klass* _next_klass; + Klass* volatile _next_klass; public: ClassLoaderDataGraphKlassIteratorAtomic(); Klass* next_klass(); diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -549,7 +549,7 @@ FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4); } if (ConcGCThreads > 1) { - _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads", + _conc_workers = new YieldingFlexibleWorkGang("CMS Thread", ConcGCThreads, true); if (_conc_workers == NULL) { warning("GC/CMS: _conc_workers allocation failure: " diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -65,7 +65,7 @@ assert(_collector == NULL, "Collector already set"); _collector = collector; - set_name("Concurrent Mark-Sweep GC Thread"); + set_name("CMS Main Thread"); if (os::create_thread(this, os::cgc_thread)) { // An old comment here said: "Priority should be just less diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -61,7 +61,7 @@ create_and_start(); // set name - set_name("G1 Concurrent Refinement Thread#%d", worker_id); + set_name("G1 Refine#%d", worker_id); } void ConcurrentG1RefineThread::initialize() { diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -687,7 +687,7 @@ gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); #endif - _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads", + _parallel_workers = new FlexibleWorkGang("G1 Marker", _max_parallel_marking_threads, false, true); if (_parallel_workers == NULL) { vm_exit_during_initialization("Failed necessary allocation."); @@ -3561,6 +3561,15 @@ _termination_start_time_ms = 0.0; #if _MARKING_STATS_ + _aborted = 0; + _aborted_overflow = 0; + _aborted_cm_aborted = 0; + _aborted_yield = 0; + _aborted_timed_out = 0; + _aborted_satb = 0; + _aborted_termination = 0; + _steal_attempts = 0; + _steals = 0; _local_pushes = 0; _local_pops = 0; _local_max_size = 0; @@ -3573,15 +3582,6 @@ _regions_claimed = 0; _objs_found_on_bitmap = 0; _satb_buffers_processed = 0; - _steal_attempts = 0; - _steals = 0; - _aborted = 0; - _aborted_overflow = 0; - _aborted_cm_aborted = 0; - _aborted_yield = 0; - _aborted_timed_out = 0; - _aborted_satb = 0; - _aborted_termination = 0; #endif // _MARKING_STATS_ } @@ -3742,7 +3742,7 @@ gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack", _worker_id, n); } - statsOnly( int tmp_size = _cm->mark_stack_size(); + statsOnly( size_t tmp_size = _cm->mark_stack_size(); if (tmp_size > _global_max_size) { _global_max_size = tmp_size; } @@ -3777,7 +3777,7 @@ assert(success, "invariant"); } - statsOnly( int tmp_size = _task_queue->size(); + statsOnly( size_t tmp_size = (size_t)_task_queue->size(); if (tmp_size > _local_max_size) { _local_max_size = tmp_size; } @@ -3934,24 +3934,24 @@ gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", _all_clock_intervals_ms.maximum(), _all_clock_intervals_ms.sum()); - gclog_or_tty->print_cr(" Clock Causes (cum): scanning = %d, marking = %d", + gclog_or_tty->print_cr(" Clock Causes (cum): scanning = " SIZE_FORMAT ", marking = " SIZE_FORMAT, _clock_due_to_scanning, _clock_due_to_marking); - gclog_or_tty->print_cr(" Objects: scanned = %d, found on the bitmap = %d", + gclog_or_tty->print_cr(" Objects: scanned = " SIZE_FORMAT ", found on the bitmap = " SIZE_FORMAT, _objs_scanned, _objs_found_on_bitmap); - gclog_or_tty->print_cr(" Local Queue: pushes = %d, pops = %d, max size = %d", + gclog_or_tty->print_cr(" Local Queue: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT, _local_pushes, _local_pops, _local_max_size); - gclog_or_tty->print_cr(" Global Stack: pushes = %d, pops = %d, max size = %d", + gclog_or_tty->print_cr(" Global Stack: pushes = " SIZE_FORMAT ", pops = " SIZE_FORMAT ", max size = " SIZE_FORMAT, _global_pushes, _global_pops, _global_max_size); - gclog_or_tty->print_cr(" transfers to = %d, transfers from = %d", + gclog_or_tty->print_cr(" transfers to = " SIZE_FORMAT ", transfers from = " SIZE_FORMAT, _global_transfers_to,_global_transfers_from); - gclog_or_tty->print_cr(" Regions: claimed = %d", _regions_claimed); - gclog_or_tty->print_cr(" SATB buffers: processed = %d", _satb_buffers_processed); - gclog_or_tty->print_cr(" Steals: attempts = %d, successes = %d", + gclog_or_tty->print_cr(" Regions: claimed = " SIZE_FORMAT, _regions_claimed); + gclog_or_tty->print_cr(" SATB buffers: processed = " SIZE_FORMAT, _satb_buffers_processed); + gclog_or_tty->print_cr(" Steals: attempts = " SIZE_FORMAT ", successes = " SIZE_FORMAT, _steal_attempts, _steals); - gclog_or_tty->print_cr(" Aborted: %d, due to", _aborted); - gclog_or_tty->print_cr(" overflow: %d, global abort: %d, yield: %d", + gclog_or_tty->print_cr(" Aborted: " SIZE_FORMAT ", due to", _aborted); + gclog_or_tty->print_cr(" overflow: " SIZE_FORMAT ", global abort: " SIZE_FORMAT ", yield: " SIZE_FORMAT, _aborted_overflow, _aborted_cm_aborted, _aborted_yield); - gclog_or_tty->print_cr(" time out: %d, SATB: %d, termination: %d", + gclog_or_tty->print_cr(" time out: " SIZE_FORMAT ", SATB: " SIZE_FORMAT ", termination: " SIZE_FORMAT, _aborted_timed_out, _aborted_satb, _aborted_termination); #endif // _MARKING_STATS_ } diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1039,36 +1039,36 @@ NumberSeq _all_clock_intervals_ms; double _interval_start_time_ms; - int _aborted; - int _aborted_overflow; - int _aborted_cm_aborted; - int _aborted_yield; - int _aborted_timed_out; - int _aborted_satb; - int _aborted_termination; + size_t _aborted; + size_t _aborted_overflow; + size_t _aborted_cm_aborted; + size_t _aborted_yield; + size_t _aborted_timed_out; + size_t _aborted_satb; + size_t _aborted_termination; - int _steal_attempts; - int _steals; + size_t _steal_attempts; + size_t _steals; - int _clock_due_to_marking; - int _clock_due_to_scanning; + size_t _clock_due_to_marking; + size_t _clock_due_to_scanning; - int _local_pushes; - int _local_pops; - int _local_max_size; - int _objs_scanned; + size_t _local_pushes; + size_t _local_pops; + size_t _local_max_size; + size_t _objs_scanned; - int _global_pushes; - int _global_pops; - int _global_max_size; + size_t _global_pushes; + size_t _global_pops; + size_t _global_max_size; - int _global_transfers_to; - int _global_transfers_from; + size_t _global_transfers_to; + size_t _global_transfers_from; - int _regions_claimed; - int _objs_found_on_bitmap; + size_t _regions_claimed; + size_t _objs_found_on_bitmap; - int _satb_buffers_processed; + size_t _satb_buffers_processed; #endif // _MARKING_STATS_ // it updates the local fields after this task has claimed diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -252,7 +252,7 @@ assert(success, "invariant"); } - statsOnly( int tmp_size = _task_queue->size(); + statsOnly( size_t tmp_size = (size_t)_task_queue->size(); if (tmp_size > _local_max_size) { _local_max_size = tmp_size; } diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp --- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,7 +48,7 @@ _vtime_accum(0.0), _vtime_mark_accum(0.0) { - set_name("G1 Main Concurrent Mark GC Thread"); + set_name("G1 Main Marker"); create_and_start(); } diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -3538,7 +3538,7 @@ r->rem_set()->clear_locked(); } assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty."); - g1h->register_humongous_region_with_in_cset_fast_test(region_idx); + g1h->register_humongous_region_with_cset(region_idx); _candidate_humongous++; } _total_humongous++; @@ -3552,7 +3552,7 @@ void flush_rem_set_entries() { _dcq.flush(); } }; -void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() { +void G1CollectedHeap::register_humongous_regions_with_cset() { if (!G1EagerReclaimHumongousObjects) { g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0); return; @@ -3859,7 +3859,7 @@ g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info); - register_humongous_regions_with_in_cset_fast_test(); + register_humongous_regions_with_cset(); assert(check_cset_fast_test(), "Inconsistency in the InCSetState table."); @@ -6077,7 +6077,7 @@ HeapRegion* next = cur->next_in_collection_set(); assert(cur->in_collection_set(), "bad CS"); cur->set_next_in_collection_set(NULL); - cur->set_in_collection_set(false); + clear_in_cset(cur); if (cur->is_young()) { int index = cur->young_index_in_cset(); @@ -6303,7 +6303,7 @@ HeapRegion* next = cur->next_in_collection_set(); assert(cur->in_collection_set(), "bad CS"); cur->set_next_in_collection_set(NULL); - cur->set_in_collection_set(false); + clear_in_cset(cur); cur->set_young_index_in_cset(-1); cur = next; } diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -110,6 +110,7 @@ void empty_list(); bool is_empty() { return _length == 0; } uint length() { return _length; } + uint eden_length() { return length() - survivor_length(); } uint survivor_length() { return _survivor_length; } // Currently we do not keep track of the used byte sum for the @@ -119,7 +120,7 @@ // we'll report the more accurate information then. size_t eden_used_bytes() { assert(length() >= survivor_length(), "invariant"); - return (size_t) (length() - survivor_length()) * HeapRegion::GrainBytes; + return (size_t) eden_length() * HeapRegion::GrainBytes; } size_t survivor_used_bytes() { return (size_t) survivor_length() * HeapRegion::GrainBytes; @@ -644,23 +645,21 @@ // is considered a candidate for eager reclamation. bool humongous_region_is_candidate(uint index); // Register the given region to be part of the collection set. - inline void register_humongous_region_with_in_cset_fast_test(uint index); + inline void register_humongous_region_with_cset(uint index); // Register regions with humongous objects (actually on the start region) in // the in_cset_fast_test table. - void register_humongous_regions_with_in_cset_fast_test(); + void register_humongous_regions_with_cset(); // We register a region with the fast "in collection set" test. We // simply set to true the array slot corresponding to this region. - void register_young_region_with_in_cset_fast_test(HeapRegion* r) { + void register_young_region_with_cset(HeapRegion* r) { _in_cset_fast_test.set_in_young(r->hrm_index()); } - void register_old_region_with_in_cset_fast_test(HeapRegion* r) { + void register_old_region_with_cset(HeapRegion* r) { _in_cset_fast_test.set_in_old(r->hrm_index()); } - - // This is a fast test on whether a reference points into the - // collection set or not. Assume that the reference - // points into the heap. - inline bool in_cset_fast_test(oop obj); + void clear_in_cset(const HeapRegion* hr) { + _in_cset_fast_test.clear(hr); + } void clear_cset_fast_test() { _in_cset_fast_test.clear(); @@ -1245,6 +1244,7 @@ // set. Slow implementation. inline bool obj_in_cs(oop obj); + inline bool is_in_cset(const HeapRegion *hr); inline bool is_in_cset(oop obj); inline bool is_in_cset_or_humongous(const oop obj); diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -234,6 +234,10 @@ return ret; } +bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) { + return _in_cset_fast_test.is_in_cset(hr); +} + bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) { return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj); } @@ -242,7 +246,7 @@ return _in_cset_fast_test.at((HeapWord*)obj); } -void G1CollectedHeap::register_humongous_region_with_in_cset_fast_test(uint index) { +void G1CollectedHeap::register_humongous_region_with_cset(uint index) { _in_cset_fast_test.set_humongous(index); } diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -537,15 +537,12 @@ // This is how many young regions we already have (currently: the survivors). uint base_min_length = recorded_survivor_regions(); - // This is the absolute minimum young length, which ensures that we - // can allocate one eden region in the worst-case. - uint absolute_min_length = base_min_length + 1; - uint desired_min_length = - calculate_young_list_desired_min_length(base_min_length); - if (desired_min_length < absolute_min_length) { - desired_min_length = absolute_min_length; - } - + uint desired_min_length = calculate_young_list_desired_min_length(base_min_length); + // This is the absolute minimum young length. Ensure that we + // will at least have one eden region available for allocation. + uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1); + // If we shrank the young list target it should not shrink below the current size. + desired_min_length = MAX2(desired_min_length, absolute_min_length); // Calculate the absolute and desired max bounds. // We will try our best not to "eat" into the reserve. @@ -1610,11 +1607,10 @@ assert(hr->is_old(), "the region should be old"); assert(!hr->in_collection_set(), "should not already be in the CSet"); - hr->set_in_collection_set(true); + _g1->register_old_region_with_cset(hr); hr->set_next_in_collection_set(_collection_set); _collection_set = hr; _collection_set_bytes_used_before += hr->used(); - _g1->register_old_region_with_in_cset_fast_test(hr); size_t rs_length = hr->rem_set()->occupied(); _recorded_rs_lengths += rs_length; _old_cset_region_length += 1; @@ -1744,10 +1740,8 @@ _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end); assert(!hr->in_collection_set(), "invariant"); - hr->set_in_collection_set(true); - assert( hr->next_in_collection_set() == NULL, "invariant"); - - _g1->register_young_region_with_in_cset_fast_test(hr); + _g1->register_young_region_with_cset(hr); + assert(hr->next_in_collection_set() == NULL, "invariant"); } // Add the region at the RHS of the incremental cset @@ -1925,7 +1919,7 @@ // [Newly Young Regions ++ Survivors from last pause]. uint survivor_region_length = young_list->survivor_length(); - uint eden_region_length = young_list->length() - survivor_region_length; + uint eden_region_length = young_list->eden_length(); init_cset_region_lengths(eden_region_length, survivor_region_length); HeapRegion* hr = young_list->first_survivor_region(); diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/gc_implementation/g1/g1InCSetState.hpp --- a/hotspot/src/share/vm/gc_implementation/g1/g1InCSetState.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1InCSetState.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1INCSETSTATE_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_G1INCSETSTATE_HPP +#include "gc_implementation/g1/heapRegion.hpp" #include "gc_implementation/g1/g1BiasedArray.hpp" #include "memory/allocation.hpp" @@ -125,8 +126,10 @@ bool is_in_cset_or_humongous(HeapWord* addr) const { return at(addr).is_in_cset_or_humongous(); } bool is_in_cset(HeapWord* addr) const { return at(addr).is_in_cset(); } + bool is_in_cset(const HeapRegion* hr) const { return get_by_index(hr->hrm_index()).is_in_cset(); } InCSetState at(HeapWord* addr) const { return get_by_address(addr); } void clear() { G1BiasedMappedArray::clear(); } + void clear(const HeapRegion* hr) { return set_by_index(hr->hrm_index(), InCSetState::NotInCSet); } }; #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1INCSETSTATE_HPP diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/gc_implementation/g1/g1StringDedupThread.cpp --- a/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupThread.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupThread.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ G1StringDedupThread::G1StringDedupThread() : ConcurrentGCThread() { - set_name("String Deduplication Thread"); + set_name("G1 StrDedup"); create_and_start(); } diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -162,7 +162,7 @@ "we should have already filtered out humongous regions"); assert(_end == orig_end(), "we should have already filtered out humongous regions"); - assert(!_in_collection_set, + assert(!in_collection_set(), err_msg("Should not clear heap region %u in the collection set", hrm_index())); set_allocation_context(AllocationContext::system()); @@ -262,7 +262,6 @@ _hrm_index(hrm_index), _allocation_context(AllocationContext::system()), _humongous_start_region(NULL), - _in_collection_set(false), _next_in_special_set(NULL), _evacuation_failed(false), _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -236,8 +236,6 @@ // For a humongous region, region in which it starts. HeapRegion* _humongous_start_region; - // True iff the region is in current collection_set. - bool _in_collection_set; // True iff an attempt to evacuate an object in the region failed. bool _evacuation_failed; @@ -487,13 +485,8 @@ return _rem_set; } - // True iff the region is in current collection_set. - bool in_collection_set() const { - return _in_collection_set; - } - void set_in_collection_set(bool b) { - _in_collection_set = b; - } + bool in_collection_set() const; + HeapRegion* next_in_collection_set() { assert(in_collection_set(), "should only invoke on member of CS."); assert(_next_in_special_set == NULL || diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -196,4 +196,8 @@ } } +inline bool HeapRegion::in_collection_set() const { + return G1CollectedHeap::heap()->is_in_cset(this); +} + #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -53,7 +53,7 @@ guarantee(_time_stamps != NULL, "Sanity"); } set_id(which); - set_name("GC task thread#%d (ParallelGC)", which); + set_name("ParGC Thread#%d", which); } GCTaskThread::~GCTaskThread() { diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp --- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -34,6 +34,7 @@ #include "memory/resourceArea.hpp" #include "oops/methodCounters.hpp" #include "oops/objArrayKlass.hpp" +#include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/interpreter/interp_masm.hpp --- a/hotspot/src/share/vm/interpreter/interp_masm.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/interpreter/interp_masm.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,25 +27,17 @@ #include "asm/macroAssembler.hpp" -#ifdef TARGET_ARCH_x86 +#if defined INTERP_MASM_MD_HPP +# include INTERP_MASM_MD_HPP +#elif defined TARGET_ARCH_x86 # include "interp_masm_x86.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_sparc +#elif defined TARGET_ARCH_MODEL_sparc # include "interp_masm_sparc.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_zero +#elif defined TARGET_ARCH_MODEL_zero # include "interp_masm_zero.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_arm -# include "interp_masm_arm.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_ppc_32 -# include "interp_masm_ppc_32.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_ppc_64 +#elif defined TARGET_ARCH_MODEL_ppc_64 # include "interp_masm_ppc_64.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_aarch64 +#elif defined TARGET_ARCH_MODEL_aarch64 # include "interp_masm_aarch64.hpp" #endif diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/interpreter/templateTable.hpp --- a/hotspot/src/share/vm/interpreter/templateTable.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/interpreter/templateTable.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -341,28 +341,19 @@ static Template* template_for_wide(Bytecodes::Code code) { Bytecodes::wide_check(code); return &_template_table_wide[code]; } // Platform specifics -#ifdef TARGET_ARCH_MODEL_x86_32 +#if defined TEMPLATETABLE_MD_HPP +# include TEMPLATETABLE_MD_HPP +#elif defined TARGET_ARCH_MODEL_x86_32 # include "templateTable_x86_32.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_x86_64 +#elif defined TARGET_ARCH_MODEL_x86_64 # include "templateTable_x86_64.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_sparc +#elif defined TARGET_ARCH_MODEL_sparc # include "templateTable_sparc.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_zero +#elif defined TARGET_ARCH_MODEL_zero # include "templateTable_zero.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_arm -# include "templateTable_arm.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_ppc_32 -# include "templateTable_ppc_32.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_ppc_64 +#elif defined TARGET_ARCH_MODEL_ppc_64 # include "templateTable_ppc_64.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_aarch64 +#elif defined TARGET_ARCH_MODEL_aarch64 # include "templateTable_aarch64.hpp" #endif diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/memory/generation.hpp --- a/hotspot/src/share/vm/memory/generation.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/memory/generation.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -139,7 +139,7 @@ // GenGrain. // Note: on ARM we add 1 bit for card_table_base to be properly aligned // (we expect its low byte to be zero - see implementation of post_barrier) - LogOfGenGrain = 16 ARM_ONLY(+1), + LogOfGenGrain = 16 ARM32_ONLY(+1), GenGrain = 1 << LogOfGenGrain }; diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/memory/sharedHeap.cpp --- a/hotspot/src/share/vm/memory/sharedHeap.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/memory/sharedHeap.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,7 +68,7 @@ } _sh = this; // ch is static, should be set only once. if (UseConcMarkSweepGC || UseG1GC) { - _workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads, + _workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads, /* are_GC_task_threads */true, /* are_ConcurrentGC_threads */false); if (_workers == NULL) { diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/opto/ad.hpp --- a/hotspot/src/share/vm/opto/ad.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/opto/ad.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,28 +25,19 @@ #ifndef SHARE_VM_OPTO_AD_HPP #define SHARE_VM_OPTO_AD_HPP -#ifdef TARGET_ARCH_MODEL_x86_32 +#if defined AD_MD_HPP +# include AD_MD_HPP +#elif defined TARGET_ARCH_MODEL_x86_32 # include "adfiles/ad_x86_32.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_x86_64 +#elif defined TARGET_ARCH_MODEL_x86_64 # include "adfiles/ad_x86_64.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_sparc +#elif defined TARGET_ARCH_MODEL_sparc # include "adfiles/ad_sparc.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_zero +#elif defined TARGET_ARCH_MODEL_zero # include "adfiles/ad_zero.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_arm -# include "adfiles/ad_arm.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_ppc_32 -# include "adfiles/ad_ppc_32.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_ppc_64 +#elif defined TARGET_ARCH_MODEL_ppc_64 # include "adfiles/ad_ppc_64.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_aarch64 +#elif defined TARGET_ARCH_MODEL_aarch64 # include "adfiles/ad_aarch64.hpp" #endif diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/opto/chaitin.cpp --- a/hotspot/src/share/vm/opto/chaitin.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/opto/chaitin.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -847,7 +847,7 @@ case Op_RegD: lrg.set_num_regs(2); // Define platform specific register pressure -#if defined(SPARC) || defined(ARM) +#if defined(SPARC) || defined(ARM32) lrg.set_reg_pressure(2); #elif defined(IA32) if( ireg == Op_RegL ) { diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/opto/optoreg.hpp --- a/hotspot/src/share/vm/opto/optoreg.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/opto/optoreg.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,28 +27,19 @@ // AdGlobals contains c2 specific register handling code as specified // in the .ad files. -#ifdef TARGET_ARCH_MODEL_x86_32 +#if defined ADGLOBALS_MD_HPP +# include ADGLOBALS_MD_HPP +#elif defined TARGET_ARCH_MODEL_x86_32 # include "adfiles/adGlobals_x86_32.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_x86_64 +#elif defined TARGET_ARCH_MODEL_x86_64 # include "adfiles/adGlobals_x86_64.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_sparc +#elif defined TARGET_ARCH_MODEL_sparc # include "adfiles/adGlobals_sparc.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_zero +#elif defined TARGET_ARCH_MODEL_zero # include "adfiles/adGlobals_zero.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_arm -# include "adfiles/adGlobals_arm.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_ppc_32 -# include "adfiles/adGlobals_ppc_32.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_ppc_64 +#elif defined TARGET_ARCH_MODEL_ppc_64 # include "adfiles/adGlobals_ppc_64.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_aarch64 +#elif defined TARGET_ARCH_MODEL_aarch64 # include "adfiles/adGlobals_aarch64.hpp" #endif diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/prims/whitebox.cpp --- a/hotspot/src/share/vm/prims/whitebox.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/prims/whitebox.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -1133,6 +1133,75 @@ VMThread::execute(&force_safepoint_op); WB_END +template +static bool GetMethodOption(JavaThread* thread, JNIEnv* env, jobject method, jstring name, T* value) { + assert(value != NULL, "sanity"); + if (method == NULL || name == NULL) { + return false; + } + jmethodID jmid = reflected_method_to_jmid(thread, env, method); + CHECK_JNI_EXCEPTION_(env, false); + methodHandle mh(thread, Method::checked_resolve_jmethod_id(jmid)); + // can't be in VM when we call JNI + ThreadToNativeFromVM ttnfv(thread); + const char* flag_name = env->GetStringUTFChars(name, NULL); + bool result = CompilerOracle::has_option_value(mh, flag_name, *value); + env->ReleaseStringUTFChars(name, flag_name); + return result; +} + +WB_ENTRY(jobject, WB_GetMethodBooleaneOption(JNIEnv* env, jobject wb, jobject method, jstring name)) + bool result; + if (GetMethodOption (thread, env, method, name, &result)) { + // can't be in VM when we call JNI + ThreadToNativeFromVM ttnfv(thread); + return booleanBox(thread, env, result); + } + return NULL; +WB_END + +WB_ENTRY(jobject, WB_GetMethodIntxOption(JNIEnv* env, jobject wb, jobject method, jstring name)) + intx result; + if (GetMethodOption (thread, env, method, name, &result)) { + // can't be in VM when we call JNI + ThreadToNativeFromVM ttnfv(thread); + return longBox(thread, env, result); + } + return NULL; +WB_END + +WB_ENTRY(jobject, WB_GetMethodUintxOption(JNIEnv* env, jobject wb, jobject method, jstring name)) + uintx result; + if (GetMethodOption (thread, env, method, name, &result)) { + // can't be in VM when we call JNI + ThreadToNativeFromVM ttnfv(thread); + return longBox(thread, env, result); + } + return NULL; +WB_END + +WB_ENTRY(jobject, WB_GetMethodDoubleOption(JNIEnv* env, jobject wb, jobject method, jstring name)) + double result; + if (GetMethodOption (thread, env, method, name, &result)) { + // can't be in VM when we call JNI + ThreadToNativeFromVM ttnfv(thread); + return doubleBox(thread, env, result); + } + return NULL; +WB_END + +WB_ENTRY(jobject, WB_GetMethodStringOption(JNIEnv* env, jobject wb, jobject method, jstring name)) + ccstr ccstrResult; + if (GetMethodOption (thread, env, method, name, &ccstrResult)) { + // can't be in VM when we call JNI + ThreadToNativeFromVM ttnfv(thread); + jstring result = env->NewStringUTF(ccstrResult); + CHECK_JNI_EXCEPTION_(env, NULL); + return result; + } + return NULL; +WB_END + //Some convenience methods to deal with objects from java int WhiteBox::offset_for_field(const char* field_name, oop object, Symbol* signature_symbol) { @@ -1333,6 +1402,21 @@ {CC"assertMatchingSafepointCalls", CC"(ZZ)V", (void*)&WB_AssertMatchingSafepointCalls }, {CC"isMonitorInflated", CC"(Ljava/lang/Object;)Z", (void*)&WB_IsMonitorInflated }, {CC"forceSafepoint", CC"()V", (void*)&WB_ForceSafepoint }, + {CC"getMethodBooleanOption", + CC"(Ljava/lang/reflect/Executable;Ljava/lang/String;)Ljava/lang/Boolean;", + (void*)&WB_GetMethodBooleaneOption}, + {CC"getMethodIntxOption", + CC"(Ljava/lang/reflect/Executable;Ljava/lang/String;)Ljava/lang/Long;", + (void*)&WB_GetMethodIntxOption}, + {CC"getMethodUintxOption", + CC"(Ljava/lang/reflect/Executable;Ljava/lang/String;)Ljava/lang/Long;", + (void*)&WB_GetMethodUintxOption}, + {CC"getMethodDoubleOption", + CC"(Ljava/lang/reflect/Executable;Ljava/lang/String;)Ljava/lang/Double;", + (void*)&WB_GetMethodDoubleOption}, + {CC"getMethodStringOption", + CC"(Ljava/lang/reflect/Executable;Ljava/lang/String;)Ljava/lang/String;", + (void*)&WB_GetMethodStringOption}, }; #undef CC diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/runtime/arguments.cpp --- a/hotspot/src/share/vm/runtime/arguments.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/runtime/arguments.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -321,6 +321,8 @@ { "UseFastEmptyMethods", JDK_Version::jdk(9), JDK_Version::jdk(10) }, #endif // ZERO { "UseCompilerSafepoints", JDK_Version::jdk(9), JDK_Version::jdk(10) }, + { "AdaptiveSizePausePolicy", JDK_Version::jdk(9), JDK_Version::jdk(10) }, + { "ParallelGCRetainPLAB", JDK_Version::jdk(9), JDK_Version::jdk(10) }, { NULL, JDK_Version(0), JDK_Version(0) } }; diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/runtime/globals.hpp --- a/hotspot/src/share/vm/runtime/globals.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/runtime/globals.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1080,9 +1080,6 @@ notproduct(bool, ProfilerCheckIntervals, false, \ "Collect and print information on spacing of profiler ticks") \ \ - develop(bool, PrintJVMWarnings, false, \ - "Print warnings for unimplemented JVM functions") \ - \ product(bool, PrintWarnings, true, \ "Print JVM warnings to output stream") \ \ @@ -1207,10 +1204,6 @@ "Use pthread-based instead of libthread-based synchronization " \ "(SPARC only)") \ \ - product(bool, AdjustConcurrency, false, \ - "Call thr_setconcurrency at thread creation time to avoid " \ - "LWP starvation on MP systems (for Solaris Only)") \ - \ product(bool, ReduceSignalUsage, false, \ "Reduce the use of OS signals in Java and/or the VM") \ \ @@ -1557,11 +1550,6 @@ product(uintx, ParallelGCBufferWastePct, 10, \ "Wasted fraction of parallel allocation buffer") \ \ - diagnostic(bool, ParallelGCRetainPLAB, false, \ - "Retain parallel allocation buffers across scavenges; " \ - "it is disabled because this currently conflicts with " \ - "parallel card scanning under certain conditions.") \ - \ product(uintx, TargetPLABWastePct, 10, \ "Target wasted space in last buffer as percent of overall " \ "allocation") \ @@ -2101,9 +2089,6 @@ product(uintx, AdaptiveSizeThroughPutPolicy, 0, \ "Policy for changing generation size for throughput goals") \ \ - product(uintx, AdaptiveSizePausePolicy, 0, \ - "Policy for changing generation size for pause goals") \ - \ develop(bool, PSAdjustTenuredGenForMinorPause, false, \ "Adjust tenured generation to achieve a minor pause goal") \ \ diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/runtime/orderAccess.hpp --- a/hotspot/src/share/vm/runtime/orderAccess.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/runtime/orderAccess.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,11 +29,7 @@ // Memory Access Ordering Model // -// This interface is based on the JSR-133 Cookbook for Compiler Writers -// and on the IA64 memory model. It is the dynamic equivalent of the -// C/C++ volatile specifier. I.e., volatility restricts compile-time -// memory access reordering in a way similar to what we want to occur -// at runtime. +// This interface is based on the JSR-133 Cookbook for Compiler Writers. // // In the following, the terms 'previous', 'subsequent', 'before', // 'after', 'preceding' and 'succeeding' refer to program order. The @@ -41,7 +37,6 @@ // relative to program order, while 'up' and 'above' refer to backward // motion. // -// // We define four primitive memory barrier operations. // // LoadLoad: Load1(s); LoadLoad; Load2 @@ -69,86 +64,88 @@ // operations. Stores before Store1 may *not* float below Load2 and any // subsequent load operations. // +// We define two further barriers: acquire and release. // -// We define two further operations, 'release' and 'acquire'. They are -// mirror images of each other. +// Conceptually, acquire/release semantics form unidirectional and +// asynchronous barriers w.r.t. a synchronizing load(X) and store(X) pair. +// They should always be used in pairs to publish (release store) and +// access (load acquire) some implicitly understood shared data between +// threads in a relatively cheap fashion not requiring storeload. If not +// used in such a pair, it is advised to use a membar instead: +// acquire/release only make sense as pairs. +// +// T1: access_shared_data +// T1: ]release +// T1: (...) +// T1: store(X) // -// Execution by a processor of release makes the effect of all memory -// accesses issued by it previous to the release visible to all -// processors *before* the release completes. The effect of subsequent -// memory accesses issued by it *may* be made visible *before* the -// release. I.e., subsequent memory accesses may float above the -// release, but prior ones may not float below it. +// T2: load(X) +// T2: (...) +// T2: acquire[ +// T2: access_shared_data +// +// It is guaranteed that if T2: load(X) synchronizes with (observes the +// value written by) T1: store(X), then the memory accesses before the T1: +// ]release happen before the memory accesses after the T2: acquire[. +// +// Total Store Order (TSO) machines can be seen as machines issuing a +// release store for each store and a load acquire for each load. Therefore +// there is an inherent resemblence between TSO and acquire/release +// semantics. TSO can be seen as an abstract machine where loads are +// executed immediately when encountered (hence loadload reordering not +// happening) but enqueues stores in a FIFO queue +// for asynchronous serialization (neither storestore or loadstore +// reordering happening). The only reordering happening is storeload due to +// the queue asynchronously serializing stores (yet in order). // -// Execution by a processor of acquire makes the effect of all memory -// accesses issued by it subsequent to the acquire visible to all -// processors *after* the acquire completes. The effect of prior memory -// accesses issued by it *may* be made visible *after* the acquire. -// I.e., prior memory accesses may float below the acquire, but -// subsequent ones may not float above it. +// Acquire/release semantics essentially exploits this asynchronicity: when +// the load(X) acquire[ observes the store of ]release store(X), the +// accesses before the release must have happened before the accesses after +// acquire. +// +// The API offers both stand-alone acquire() and release() as well as bound +// load_acquire() and release_store(). It is guaranteed that these are +// semantically equivalent w.r.t. the defined model. However, since +// stand-alone acquire()/release() does not know which previous +// load/subsequent store is considered the synchronizing load/store, they +// may be more conservative in implementations. We advise using the bound +// variants whenever possible. +// +// Finally, we define a "fence" operation, as a bidirectional barrier. +// It guarantees that any memory access preceding the fence is not +// reordered w.r.t. any memory accesses subsequent to the fence in program +// order. This may be used to prevent sequences of loads from floating up +// above sequences of stores. // -// Finally, we define a 'fence' operation, which conceptually is a -// release combined with an acquire. In the real world these operations -// require one or more machine instructions which can float above and -// below the release or acquire, so we usually can't just issue the -// release-acquire back-to-back. All machines we know of implement some -// sort of memory fence instruction. +// The following table shows the implementations on some architectures: +// +// Constraint x86 sparc TSO ppc +// --------------------------------------------------------------------------- +// fence LoadStore | lock membar #StoreLoad sync +// StoreStore | addl 0,(sp) +// LoadLoad | +// StoreLoad +// +// release LoadStore | lwsync +// StoreStore +// +// acquire LoadLoad | lwsync +// LoadStore +// +// release_store lwsync +// +// +// release_store_fence xchg lwsync +// membar #StoreLoad +// sync // // -// The standalone implementations of release and acquire need an associated -// dummy volatile store or load respectively. To avoid redundant operations, -// we can define the composite operators: 'release_store', 'store_fence' and -// 'load_acquire'. Here's a summary of the machine instructions corresponding -// to each operation. -// -// sparc RMO ia64 x86 -// --------------------------------------------------------------------- -// fence membar #LoadStore | mf lock addl 0,(sp) -// #StoreStore | -// #LoadLoad | -// #StoreLoad -// -// release membar #LoadStore | st.rel [sp]=r0 movl $0, -// #StoreStore -// st %g0,[] -// -// acquire ld [%sp],%g0 ld.acq =[sp] movl (sp), -// membar #LoadLoad | -// #LoadStore -// -// release_store membar #LoadStore | st.rel -// #StoreStore -// st +// load_acquire +// lwsync // -// store_fence st st lock xchg -// fence mf -// -// load_acquire ld ld.acq -// membar #LoadLoad | -// #LoadStore -// -// Using only release_store and load_acquire, we can implement the -// following ordered sequences. -// -// 1. load, load == load_acquire, load -// or load_acquire, load_acquire -// 2. load, store == load, release_store -// or load_acquire, store -// or load_acquire, release_store -// 3. store, store == store, release_store -// or release_store, release_store -// -// These require no membar instructions for sparc-TSO and no extra -// instructions for ia64. -// -// Ordering a load relative to preceding stores requires a store_fence, +// Ordering a load relative to preceding stores requires a StoreLoad, // which implies a membar #StoreLoad between the store and load under -// sparc-TSO. A fence is required by ia64. On x86, we use locked xchg. -// -// 4. store, load == store_fence, load -// -// Use store_fence to make sure all stores done in an 'interesting' -// region are made visible prior to both subsequent loads and stores. +// sparc-TSO. On x86, we use explicitly locked add. // // Conventional usage is to issue a load_acquire for ordered loads. Use // release_store for ordered stores when you care only that prior stores @@ -157,27 +154,19 @@ // release_store_fence to update values like the thread state, where we // don't want the current thread to continue until all our prior memory // accesses (including the new thread state) are visible to other threads. -// +// This is equivalent to the volatile semantics of the Java Memory Model. // -// C++ Volatility +// C++ Volatile Semantics // -// C++ guarantees ordering at operations termed 'sequence points' (defined -// to be volatile accesses and calls to library I/O functions). 'Side -// effects' (defined as volatile accesses, calls to library I/O functions -// and object modification) previous to a sequence point must be visible -// at that sequence point. See the C++ standard, section 1.9, titled -// "Program Execution". This means that all barrier implementations, -// including standalone loadload, storestore, loadstore, storeload, acquire -// and release must include a sequence point, usually via a volatile memory -// access. Other ways to guarantee a sequence point are, e.g., use of -// indirect calls and linux's __asm__ volatile. -// Note: as of 6973570, we have replaced the originally static "dummy" field -// (see above) by a volatile store to the stack. All of the versions of the -// compilers that we currently use (SunStudio, gcc and VC++) respect the -// semantics of volatile here. If you build HotSpot using other -// compilers, you may need to verify that no compiler reordering occurs -// across the sequence point represented by the volatile access. -// +// C++ volatile semantics prevent compiler re-ordering between +// volatile memory accesses. However, reordering between non-volatile +// and volatile memory accesses is in general undefined. For compiler +// reordering constraints taking non-volatile memory accesses into +// consideration, a compiler barrier has to be used instead. Some +// compiler implementations may choose to enforce additional +// constraints beyond those required by the language. Note also that +// both volatile semantics and compiler barrier do not prevent +// hardware reordering. // // os::is_MP Considered Redundant // @@ -240,8 +229,32 @@ // order. If their implementations change such that these assumptions // are violated, a whole lot of code will break. +enum ScopedFenceType { + X_ACQUIRE + , RELEASE_X + , RELEASE_X_FENCE +}; + +template +class ScopedFenceGeneral: public StackObj { + public: + void prefix() {} + void postfix() {} +}; + +template +class ScopedFence : public ScopedFenceGeneral { + void *const _field; + public: + ScopedFence(void *const field) : _field(field) { prefix(); } + ~ScopedFence() { postfix(); } + void prefix() { ScopedFenceGeneral::prefix(); } + void postfix() { ScopedFenceGeneral::postfix(); } +}; + class OrderAccess : AllStatic { public: + // barriers static void loadload(); static void storestore(); static void loadstore(); @@ -280,20 +293,6 @@ static void release_store_ptr(volatile intptr_t* p, intptr_t v); static void release_store_ptr(volatile void* p, void* v); - static void store_fence(jbyte* p, jbyte v); - static void store_fence(jshort* p, jshort v); - static void store_fence(jint* p, jint v); - static void store_fence(jlong* p, jlong v); - static void store_fence(jubyte* p, jubyte v); - static void store_fence(jushort* p, jushort v); - static void store_fence(juint* p, juint v); - static void store_fence(julong* p, julong v); - static void store_fence(jfloat* p, jfloat v); - static void store_fence(jdouble* p, jdouble v); - - static void store_ptr_fence(intptr_t* p, intptr_t v); - static void store_ptr_fence(void** p, void* v); - static void release_store_fence(volatile jbyte* p, jbyte v); static void release_store_fence(volatile jshort* p, jshort v); static void release_store_fence(volatile jint* p, jint v); @@ -313,6 +312,47 @@ // routine if it exists, It should only be used by platforms that // don't have another way to do the inline assembly. static void StubRoutines_fence(); + + // Give platforms a variation point to specialize. + template static T specialized_load_acquire (volatile T* p ); + template static void specialized_release_store (volatile T* p, T v); + template static void specialized_release_store_fence(volatile T* p, T v); + + template + static void ordered_store(volatile FieldType* p, FieldType v); + + template + static FieldType ordered_load(volatile FieldType* p); + + static void store(volatile jbyte* p, jbyte v); + static void store(volatile jshort* p, jshort v); + static void store(volatile jint* p, jint v); + static void store(volatile jlong* p, jlong v); + static void store(volatile jdouble* p, jdouble v); + static void store(volatile jfloat* p, jfloat v); + + static jbyte load (volatile jbyte* p); + static jshort load (volatile jshort* p); + static jint load (volatile jint* p); + static jlong load (volatile jlong* p); + static jdouble load (volatile jdouble* p); + static jfloat load (volatile jfloat* p); + + // The following store_fence methods are deprecated and will be removed + // when all repos conform to the new generalized OrderAccess. + static void store_fence(jbyte* p, jbyte v); + static void store_fence(jshort* p, jshort v); + static void store_fence(jint* p, jint v); + static void store_fence(jlong* p, jlong v); + static void store_fence(jubyte* p, jubyte v); + static void store_fence(jushort* p, jushort v); + static void store_fence(juint* p, juint v); + static void store_fence(julong* p, julong v); + static void store_fence(jfloat* p, jfloat v); + static void store_fence(jdouble* p, jdouble v); + + static void store_ptr_fence(intptr_t* p, intptr_t v); + static void store_ptr_fence(void** p, void* v); }; #endif // SHARE_VM_RUNTIME_ORDERACCESS_HPP diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/runtime/orderAccess.inline.hpp --- a/hotspot/src/share/vm/runtime/orderAccess.inline.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/runtime/orderAccess.inline.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright 2014 SAP AG. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -26,6 +26,7 @@ #ifndef SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP #define SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP +#include "runtime/atomic.inline.hpp" #include "runtime/orderAccess.hpp" // Linux @@ -74,4 +75,92 @@ # include "orderAccess_bsd_zero.inline.hpp" #endif +#ifdef VM_HAS_GENERALIZED_ORDER_ACCESS + +template<> inline void ScopedFenceGeneral::postfix() { OrderAccess::acquire(); } +template<> inline void ScopedFenceGeneral::prefix() { OrderAccess::release(); } +template<> inline void ScopedFenceGeneral::prefix() { OrderAccess::release(); } +template<> inline void ScopedFenceGeneral::postfix() { OrderAccess::fence(); } + + +template +inline void OrderAccess::ordered_store(volatile FieldType* p, FieldType v) { + ScopedFence f((void*)p); + store(p, v); +} + +template +inline FieldType OrderAccess::ordered_load(volatile FieldType* p) { + ScopedFence f((void*)p); + return load(p); +} + +inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return specialized_load_acquire(p); } +inline jshort OrderAccess::load_acquire(volatile jshort* p) { return specialized_load_acquire(p); } +inline jint OrderAccess::load_acquire(volatile jint* p) { return specialized_load_acquire(p); } +inline jlong OrderAccess::load_acquire(volatile jlong* p) { return specialized_load_acquire(p); } +inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return specialized_load_acquire(p); } +inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return specialized_load_acquire(p); } +inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return (jubyte) specialized_load_acquire((volatile jbyte*)p); } +inline jushort OrderAccess::load_acquire(volatile jushort* p) { return (jushort)specialized_load_acquire((volatile jshort*)p); } +inline juint OrderAccess::load_acquire(volatile juint* p) { return (juint) specialized_load_acquire((volatile jint*)p); } +inline julong OrderAccess::load_acquire(volatile julong* p) { return (julong) specialized_load_acquire((volatile jlong*)p); } + +inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return (intptr_t)specialized_load_acquire(p); } +inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return (void*)specialized_load_acquire((volatile intptr_t*)p); } +inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return (void*)specialized_load_acquire((volatile intptr_t*)p); } + +inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { specialized_release_store(p, v); } +inline void OrderAccess::release_store(volatile jshort* p, jshort v) { specialized_release_store(p, v); } +inline void OrderAccess::release_store(volatile jint* p, jint v) { specialized_release_store(p, v); } +inline void OrderAccess::release_store(volatile jlong* p, jlong v) { specialized_release_store(p, v); } +inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { specialized_release_store(p, v); } +inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { specialized_release_store(p, v); } +inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { specialized_release_store((volatile jbyte*) p, (jbyte) v); } +inline void OrderAccess::release_store(volatile jushort* p, jushort v) { specialized_release_store((volatile jshort*)p, (jshort)v); } +inline void OrderAccess::release_store(volatile juint* p, juint v) { specialized_release_store((volatile jint*) p, (jint) v); } +inline void OrderAccess::release_store(volatile julong* p, julong v) { specialized_release_store((volatile jlong*) p, (jlong) v); } + +inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { specialized_release_store(p, v); } +inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { specialized_release_store((volatile intptr_t*)p, (intptr_t)v); } + +inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { specialized_release_store_fence(p, v); } +inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { specialized_release_store_fence(p, v); } +inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { specialized_release_store_fence(p, v); } +inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { specialized_release_store_fence(p, v); } +inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { specialized_release_store_fence(p, v); } +inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { specialized_release_store_fence(p, v); } +inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { specialized_release_store_fence((volatile jbyte*) p, (jbyte) v); } +inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { specialized_release_store_fence((volatile jshort*)p, (jshort)v); } +inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { specialized_release_store_fence((volatile jint*) p, (jint) v); } +inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { specialized_release_store_fence((volatile jlong*) p, (jlong) v); } + +inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { specialized_release_store_fence(p, v); } +inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { specialized_release_store_fence((volatile intptr_t*)p, (intptr_t)v); } + +// The following methods can be specialized using simple template specialization +// in the platform specific files for optimization purposes. Otherwise the +// generalized variant is used. +template inline T OrderAccess::specialized_load_acquire (volatile T* p) { return ordered_load(p); } +template inline void OrderAccess::specialized_release_store (volatile T* p, T v) { ordered_store(p, v); } +template inline void OrderAccess::specialized_release_store_fence(volatile T* p, T v) { ordered_store(p, v); } + +// Generalized atomic volatile accesses valid in OrderAccess +// All other types can be expressed in terms of these. +inline void OrderAccess::store(volatile jbyte* p, jbyte v) { *p = v; } +inline void OrderAccess::store(volatile jshort* p, jshort v) { *p = v; } +inline void OrderAccess::store(volatile jint* p, jint v) { *p = v; } +inline void OrderAccess::store(volatile jlong* p, jlong v) { Atomic::store(v, p); } +inline void OrderAccess::store(volatile jdouble* p, jdouble v) { Atomic::store(jlong_cast(v), (volatile jlong*)p); } +inline void OrderAccess::store(volatile jfloat* p, jfloat v) { *p = v; } + +inline jbyte OrderAccess::load(volatile jbyte* p) { return *p; } +inline jshort OrderAccess::load(volatile jshort* p) { return *p; } +inline jint OrderAccess::load(volatile jint* p) { return *p; } +inline jlong OrderAccess::load(volatile jlong* p) { return Atomic::load(p); } +inline jdouble OrderAccess::load(volatile jdouble* p) { return jdouble_cast(Atomic::load((volatile jlong*)p)); } +inline jfloat OrderAccess::load(volatile jfloat* p) { return *p; } + +#endif // VM_HAS_GENERALIZED_ORDER_ACCESS + #endif // SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/runtime/stubRoutines.hpp --- a/hotspot/src/share/vm/runtime/stubRoutines.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/runtime/stubRoutines.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -84,32 +84,22 @@ // Dependencies friend class StubGenerator; -#ifdef TARGET_ARCH_MODEL_x86_32 +#if defined STUBROUTINES_MD_HPP +# include STUBROUTINES_MD_HPP +#elif defined TARGET_ARCH_MODEL_x86_32 # include "stubRoutines_x86_32.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_x86_64 +#elif defined TARGET_ARCH_MODEL_x86_64 # include "stubRoutines_x86_64.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_sparc +#elif defined TARGET_ARCH_MODEL_sparc # include "stubRoutines_sparc.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_zero +#elif defined TARGET_ARCH_MODEL_zero # include "stubRoutines_zero.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_arm -# include "stubRoutines_arm.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_ppc_32 -# include "stubRoutines_ppc_32.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_ppc_64 +#elif defined TARGET_ARCH_MODEL_ppc_64 # include "stubRoutines_ppc_64.hpp" -#endif -#ifdef TARGET_ARCH_MODEL_aarch64 +#elif defined TARGET_ARCH_MODEL_aarch64 # include "stubRoutines_aarch64.hpp" #endif - static jint _verify_oop_count; static address _verify_oop_subroutine_entry; diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/runtime/vm_version.cpp --- a/hotspot/src/share/vm/runtime/vm_version.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/runtime/vm_version.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -187,18 +187,18 @@ AIX_ONLY("aix") \ BSD_ONLY("bsd") +#ifndef CPU #ifdef ZERO #define CPU ZERO_LIBARCH #else #define CPU IA32_ONLY("x86") \ IA64_ONLY("ia64") \ AMD64_ONLY("amd64") \ - ARM_ONLY("arm") \ - PPC32_ONLY("ppc") \ PPC64_ONLY("ppc64") \ AARCH64_ONLY("aarch64") \ SPARC_ONLY("sparc") #endif // ZERO +#endif const char *Abstract_VM_Version::vm_platform_string() { return OS "-" CPU; @@ -251,12 +251,6 @@ #ifndef FLOAT_ARCH #if defined(__SOFTFP__) #define FLOAT_ARCH_STR "-sflt" - #elif defined(E500V2) - #define FLOAT_ARCH_STR "-e500v2" - #elif defined(ARM) - #define FLOAT_ARCH_STR "-vfp" - #elif defined(PPC32) - #define FLOAT_ARCH_STR "-hflt" #else #define FLOAT_ARCH_STR "" #endif diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/services/mallocSiteTable.cpp --- a/hotspot/src/share/vm/services/mallocSiteTable.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/services/mallocSiteTable.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -135,8 +135,7 @@ */ MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx) { - int index = hash_to_index(key.hash()); - assert(index >= 0, err_msg("Negative index %d", index)); + unsigned int index = hash_to_index(key.hash()); *bucket_idx = (size_t)index; *pos_idx = 0; diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/services/mallocSiteTable.hpp --- a/hotspot/src/share/vm/services/mallocSiteTable.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/services/mallocSiteTable.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -238,8 +238,7 @@ static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx); static bool walk(MallocSiteWalker* walker); - static inline int hash_to_index(int hash) { - hash = (hash > 0) ? hash : (-hash); + static inline unsigned int hash_to_index(unsigned int hash) { return (hash % table_size); } diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp --- a/hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -220,7 +220,7 @@ #define DEBUG_EXCEPTION ::abort(); -#ifdef ARM +#ifdef ARM32 #ifdef SOLARIS #define BREAKPOINT __asm__ volatile (".long 0xe1200070") #else diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp --- a/hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,7 +33,9 @@ # include +#define __USE_LEGACY_PROTOTYPES__ # include +#undef __USE_LEGACY_PROTOTYPES__ # include # include // for bsd'isms # include diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/utilities/macros.hpp --- a/hotspot/src/share/vm/utilities/macros.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/utilities/macros.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -392,7 +392,6 @@ #define NOT_E500V2(code) code #endif - #ifdef ARM #define ARM_ONLY(code) code #define NOT_ARM(code) @@ -401,6 +400,14 @@ #define NOT_ARM(code) code #endif +#ifdef ARM32 +#define ARM32_ONLY(code) code +#define NOT_ARM32(code) +#else +#define ARM32_ONLY(code) +#define NOT_ARM32(code) code +#endif + #ifdef AARCH64 #define AARCH64_ONLY(code) code #define NOT_AARCH64(code) diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/utilities/nativeCallStack.cpp --- a/hotspot/src/share/vm/utilities/nativeCallStack.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/utilities/nativeCallStack.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -55,6 +55,7 @@ for (; index < NMT_TrackingStackDepth; index ++) { _stack[index] = NULL; } + _hash_value = 0; } // number of stack frames captured @@ -69,19 +70,16 @@ } // Hash code. Any better algorithm? -int NativeCallStack::hash() const { - long hash_val = _hash_value; +unsigned int NativeCallStack::hash() const { + uintptr_t hash_val = _hash_value; if (hash_val == 0) { - long pc; - int index; - for (index = 0; index < NMT_TrackingStackDepth; index ++) { - pc = (long)_stack[index]; - if (pc == 0) break; - hash_val += pc; + for (int index = 0; index < NMT_TrackingStackDepth; index++) { + if (_stack[index] == NULL) break; + hash_val += (uintptr_t)_stack[index]; } NativeCallStack* p = const_cast(this); - p->_hash_value = (int)(hash_val & 0xFFFFFFFF); + p->_hash_value = (unsigned int)(hash_val & 0xFFFFFFFF); } return _hash_value; } diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/utilities/nativeCallStack.hpp --- a/hotspot/src/share/vm/utilities/nativeCallStack.hpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/utilities/nativeCallStack.hpp Wed Jul 05 20:25:13 2017 +0200 @@ -56,8 +56,8 @@ static const NativeCallStack EMPTY_STACK; private: - address _stack[NMT_TrackingStackDepth]; - int _hash_value; + address _stack[NMT_TrackingStackDepth]; + unsigned int _hash_value; public: NativeCallStack(int toSkip = 0, bool fillStack = false); @@ -89,7 +89,7 @@ } // Hash code. Any better algorithm? - int hash() const; + unsigned int hash() const; void print_on(outputStream* out) const; void print_on(outputStream* out, int indent) const; diff -r 17efac395638 -r a46e5922bb40 hotspot/src/share/vm/utilities/workgroup.cpp --- a/hotspot/src/share/vm/utilities/workgroup.cpp Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/src/share/vm/utilities/workgroup.cpp Wed Jul 05 20:25:13 2017 +0200 @@ -236,7 +236,7 @@ GangWorker::GangWorker(AbstractWorkGang* gang, uint id) { _gang = gang; set_id(id); - set_name("Gang worker#%d (%s)", id, gang->name()); + set_name("%s#%d", gang->name(), id); } void GangWorker::run() { diff -r 17efac395638 -r a46e5922bb40 hotspot/test/compiler/oracle/GetMethodOptionTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/compiler/oracle/GetMethodOptionTest.java Wed Jul 05 20:25:13 2017 +0200 @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.lang.reflect.Executable; +import java.util.function.BiFunction; + +import com.oracle.java.testlibrary.Asserts; +import sun.hotspot.WhiteBox; + +/* + * @test + * @bug 8074980 + * @library /testlibrary /../../test/lib + * @build sun.hotspot.WhiteBox com.oracle.java.testlibrary.Asserts GetMethodOptionTest + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * -XX:CompileCommand=option,GetMethodOptionTest::test,ccstrlist,MyListOption,_foo,_bar + * -XX:CompileCommand=option,GetMethodOptionTest::test,ccstr,MyStrOption,_foo + * -XX:CompileCommand=option,GetMethodOptionTest::test,bool,MyBoolOption,false + * -XX:CompileCommand=option,GetMethodOptionTest::test,intx,MyIntxOption,-1 + * -XX:CompileCommand=option,GetMethodOptionTest::test,uintx,MyUintxOption,1 + * -XX:CompileCommand=option,GetMethodOptionTest::test,MyFlag + * -XX:CompileCommand=option,GetMethodOptionTest::test,double,MyDoubleOption1,1.123 + * -XX:CompileCommand=option,GetMethodOptionTest.test,double,MyDoubleOption2,1.123 + * -XX:CompileCommand=option,GetMethodOptionTest::test,bool,MyBoolOptionX,false,intx,MyIntxOptionX,-1,uintx,MyUintxOptionX,1,MyFlagX,double,MyDoubleOptionX,1.123 + * GetMethodOptionTest + */ + +public class GetMethodOptionTest { + private static final WhiteBox WB = WhiteBox.getWhiteBox(); + public static void main(String[] args) { + Executable test = getMethod("test"); + Executable test2 = getMethod("test2"); + BiFunction getter = WB::getMethodOption; + for (TestCase testCase : TestCase.values()) { + Object expected = testCase.value; + String name = testCase.name(); + Asserts.assertEQ(expected, getter.apply(test, name), + testCase + ": universal getter returns wrong value"); + Asserts.assertEQ(expected, testCase.getter.apply(test, name), + testCase + ": specific getter returns wrong value"); + Asserts.assertEQ(null, getter.apply(test2, name), + testCase + ": universal getter returns value for unused method"); + Asserts.assertEQ(null, testCase.getter.apply(test2, name), + testCase + ": type specific getter returns value for unused method"); + } + } + private static void test() { } + private static void test2() { } + + private static enum TestCase { + MyListOption("_foo _bar", WB::getMethodStringOption), + MyStrOption("_foo", WB::getMethodStringOption), + MyBoolOption(false, WB::getMethodBooleanOption), + MyIntxOption(-1L, WB::getMethodIntxOption), + MyUintxOption(1L, WB::getMethodUintxOption), + MyFlag(true, WB::getMethodBooleanOption), + MyDoubleOption1(1.123d, WB::getMethodDoubleOption), + MyDoubleOption2(1.123d, WB::getMethodDoubleOption), + MyBoolOptionX(false, WB::getMethodBooleanOption), + MyIntxOptionX(-1L, WB::getMethodIntxOption), + MyUintxOptionX(1L, WB::getMethodUintxOption), + MyFlagX(true, WB::getMethodBooleanOption), + MyDoubleOptionX(1.123d, WB::getMethodDoubleOption); + + public final Object value; + public final BiFunction getter; + private TestCase(Object value, BiFunction getter) { + this.value = value; + this.getter = getter; + } + } + + private static Executable getMethod(String name) { + Executable result; + try { + result = GetMethodOptionTest.class.getDeclaredMethod(name); + } catch (NoSuchMethodException | SecurityException e) { + throw new Error("TESTBUG : can't get method " + name, e); + } + return result; + } +} diff -r 17efac395638 -r a46e5922bb40 hotspot/test/compiler/whitebox/DeoptimizeFramesTest.java --- a/hotspot/test/compiler/whitebox/DeoptimizeFramesTest.java Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/test/compiler/whitebox/DeoptimizeFramesTest.java Wed Jul 05 20:25:13 2017 +0200 @@ -31,11 +31,13 @@ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions * -XX:+WhiteBoxAPI -Xmixed * -XX:CompileCommand=compileonly,DeoptimizeFramesTest$TestCaseImpl::method - * -XX:-DeoptimizeRandom -XX:-DeoptimizeALot DeoptimizeFramesTest true + * -XX:+IgnoreUnexpectedVMOptions -XX:-DeoptimizeRandom -XX:-DeoptimizeALot + * DeoptimizeFramesTest true * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions * -XX:+WhiteBoxAPI -Xmixed * -XX:CompileCommand=compileonly,DeoptimizeFramesTest$TestCaseImpl::method - * -XX:-DeoptimizeRandom -XX:-DeoptimizeALot DeoptimizeFramesTest false + * -XX:+IgnoreUnexpectedVMOptions -XX:-DeoptimizeRandom -XX:-DeoptimizeALot + * DeoptimizeFramesTest false * @summary testing of WB::deoptimizeFrames() */ import java.lang.reflect.Executable; diff -r 17efac395638 -r a46e5922bb40 hotspot/test/gc/parallelScavenge/TestDynShrinkHeap.java --- a/hotspot/test/gc/parallelScavenge/TestDynShrinkHeap.java Thu Mar 19 16:13:40 2015 -0700 +++ b/hotspot/test/gc/parallelScavenge/TestDynShrinkHeap.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,7 +22,6 @@ */ /** - * @ignore 8019361 * @test TestDynShrinkHeap * @bug 8016479 * @requires vm.gc=="Parallel" | vm.gc=="null" @@ -35,7 +34,7 @@ import java.lang.management.MemoryUsage; import java.util.ArrayList; import sun.management.ManagementFactoryHelper; -import static com.oracle.java.testlibrary.Asserts.*; +import static com.oracle.java.testlibrary.Asserts.assertLessThan; public class TestDynShrinkHeap { diff -r 17efac395638 -r a46e5922bb40 jaxp/.hgtags --- a/jaxp/.hgtags Thu Mar 19 16:13:40 2015 -0700 +++ b/jaxp/.hgtags Wed Jul 05 20:25:13 2017 +0200 @@ -297,3 +297,4 @@ 57b26c883d54f45912bc3885ccad3c6b80960b1f jdk9-b52 d5b5a010a16688f188f5a9247ed873f5100b530c jdk9-b53 542c0c855ad467624cbedf11bff08e44b86b068d jdk9-b54 +2a460ce60ed47081f756f0cc0321d8e9ba7cac17 jdk9-b55 diff -r 17efac395638 -r a46e5922bb40 jaxws/.hgtags --- a/jaxws/.hgtags Thu Mar 19 16:13:40 2015 -0700 +++ b/jaxws/.hgtags Wed Jul 05 20:25:13 2017 +0200 @@ -300,3 +300,4 @@ 1d1e7704eca9c77ebe6a8705d17ac568801f7a3b jdk9-b52 b8fbe40efa97fe0753076ccc6dfc50747c7877d0 jdk9-b53 83a0cf0e08788c33872e1fe3e87bf9a0d1e59eaa jdk9-b54 +ca481b0492c82cc38fa0e6b746305ed88c26b4fd jdk9-b55 diff -r 17efac395638 -r a46e5922bb40 jdk/.hgtags --- a/jdk/.hgtags Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/.hgtags Wed Jul 05 20:25:13 2017 +0200 @@ -297,3 +297,4 @@ 607ea68032cd4a4cf2c7a7a41fcb39602d6a75e2 jdk9-b52 6cb5f5c34009630749a40cefe116d143f0b2583e jdk9-b53 568a62ab7d764d7c74ac1d87387dbe500662b551 jdk9-b54 +d49e247dade61f29f771f09b2105857492241156 jdk9-b55 diff -r 17efac395638 -r a46e5922bb40 jdk/make/data/currency/CurrencyData.properties --- a/jdk/make/data/currency/CurrencyData.properties Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/make/data/currency/CurrencyData.properties Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -23,7 +23,10 @@ # questions. # -formatVersion=1 +# Version of the currency data format. +# 1: initial +# 2: Change in minor unit (allowing 4-9 digits) +formatVersion=2 # Version of the currency code information in this class. # It is a serial number that accompanies with each amendment. @@ -36,7 +39,7 @@ all=ADP020-AED784-AFA004-AFN971-ALL008-AMD051-ANG532-AOA973-ARS032-ATS040-AUD036-\ AWG533-AYM945-AZM031-AZN944-BAM977-BBD052-BDT050-BEF056-BGL100-BGN975-BHD048-BIF108-\ BMD060-BND096-BOB068-BOV984-BRL986-BSD044-BTN064-BWP072-BYB112-BYR974-\ - BZD084-CAD124-CDF976-CHF756-CLF990-CLP152-CNY156-COP170-CRC188-CSD891-CUP192-CUC931-\ + BZD084-CAD124-CDF976-CHE947-CHF756-CHW948-CLF990-CLP152-CNY156-COP170-COU970-CRC188-CSD891-CUP192-CUC931-\ CVE132-CYP196-CZK203-DEM276-DJF262-DKK208-DOP214-DZD012-EEK233-EGP818-\ ERN232-ESP724-ETB230-EUR978-FIM246-FJD242-FKP238-FRF250-GBP826-GEL981-\ GHC288-GHS936-GIP292-GMD270-GNF324-GRD300-GTQ320-GWP624-GYD328-HKD344-HNL340-\ @@ -49,7 +52,7 @@ PKR586-PLN985-PTE620-PYG600-QAR634-ROL946-RON946-RSD941-RUB643-RUR810-RWF646-SAR682-\ SBD090-SCR690-SDD736-SDG938-SEK752-SGD702-SHP654-SIT705-SKK703-SLL694-SOS706-\ SRD968-SRG740-SSP728-STD678-SVC222-SYP760-SZL748-THB764-TJS972-TMM795-TMT934-TND788-TOP776-\ - TPE626-TRL792-TRY949-TTD780-TWD901-TZS834-UAH980-UGX800-USD840-USN997-USS998-\ + TPE626-TRL792-TRY949-TTD780-TWD901-TZS834-UAH980-UGX800-USD840-USN997-USS998-UYI940-\ UYU858-UZS860-VEB862-VEF937-VND704-VUV548-WST882-XAF950-XAG961-XAU959-XBA955-\ XBB956-XBC957-XBD958-XCD951-XDR960-XFO000-XFU000-XOF952-XPD964-XPF953-\ XPT962-XSU994-XTS963-XUA965-XXX999-YER886-YUM891-ZAR710-ZMK894-ZMW967-ZWD716-ZWL932-\ @@ -579,16 +582,17 @@ ZW=ZWL -# List of currencies with 0, 1, OR 3 decimals for minor units, or where there -# are no minor units defined. All others use 2 decimals. +# List of currencies with non-2digit decimals for minor units, +# or where there are no minor units defined. All others use 2 decimals. minor0=\ - ADP-BEF-BIF-BYB-BYR-CLF-CLP-DJF-ESP-GNF-\ + ADP-BEF-BIF-BYB-BYR-CLP-DJF-ESP-GNF-\ GRD-ISK-ITL-JPY-KMF-KRW-LUF-MGF-PYG-PTE-RWF-\ - TPE-TRL-UGX-VND-VUV-XAF-XOF-XPF -minor1= + TPE-TRL-UGX-UYI-VND-VUV-XAF-XOF-XPF minor3=\ BHD-IQD-JOD-KWD-LYD-OMR-TND +minor4=\ + CLF minorUndefined=\ XAG-XAU-XBA-XBB-XBC-XBD-XDR-XFO-XFU-XPD-\ XPT-XSU-XTS-XUA-XXX diff -r 17efac395638 -r a46e5922bb40 jdk/make/gensrc/Gensrc-jdk.jconsole.gmk --- a/jdk/make/gensrc/Gensrc-jdk.jconsole.gmk Thu Mar 19 16:13:40 2015 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,45 +0,0 @@ -# -# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. Oracle designates this -# particular file as subject to the "Classpath" exception as provided -# by Oracle in the LICENSE file that accompanied this code. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# - -include GensrcCommon.gmk - -########################################################################################## -# Version file for jconsole - -$(SUPPORT_OUTPUTDIR)/gensrc/jdk.jconsole/sun/tools/jconsole/Version.java: \ - $(JDK_TOPDIR)/src/jdk.jconsole/share/classes/sun/tools/jconsole/Version.java.template - $(MKDIR) -p $(@D) - $(RM) $@ $@.tmp - $(ECHO) $(LOG_INFO) Generating sun/tools/jconsole/Version.java - $(SED) -e 's/@@jconsole_version@@/$(FULL_VERSION)/g' $< > $@.tmp - $(MV) $@.tmp $@ - -GENSRC_JDK_JCONSOLE += $(SUPPORT_OUTPUTDIR)/gensrc/jdk.jconsole/sun/tools/jconsole/Version.java - -jdk.jconsole: $(GENSRC_JDK_JCONSOLE) - -all: jdk.jconsole - -.PHONY: all jdk.jconsole diff -r 17efac395638 -r a46e5922bb40 jdk/make/lib/Lib-java.security.jgss.gmk --- a/jdk/make/lib/Lib-java.security.jgss.gmk Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/make/lib/Lib-java.security.jgss.gmk Wed Jul 05 20:25:13 2017 +0200 @@ -41,7 +41,6 @@ CFLAGS := $(CFLAGS_JDKLIB) $(addprefix -I, $(LIBJ2GSS_SRC)) \ $(LIBJAVA_HEADER_FLAGS) \ -I$(SUPPORT_OUTPUTDIR)/headers/java.security.jgss, \ - DISABLED_WARNINGS_gcc := pointer-to-int-cast, \ MAPFILE := $(JDK_TOPDIR)/make/mapfiles/libj2gss/mapfile-vers, \ LDFLAGS := $(LDFLAGS_JDKLIB) \ $(call SET_SHARED_LIBRARY_ORIGIN), \ @@ -74,6 +73,8 @@ endif ifneq ($(BUILD_LIBKRB5_NAME), ) + # libosxkrb5 needs to call deprecated krb5 APIs so that java + # can use the native credentials cache. $(eval $(call SetupNativeCompilation,BUILD_LIBKRB5, \ LIBRARY := $(BUILD_LIBKRB5_NAME), \ OUTPUT_DIR := $(INSTALL_LIBRARIES_HERE), \ @@ -83,7 +84,7 @@ CFLAGS := $(CFLAGS_JDKLIB) \ $(addprefix -I, $(BUILD_LIBKRB5_SRC)) \ -I$(SUPPORT_OUTPUTDIR)/headers/java.security.jgss, \ - DISABLED_WARNINGS_clang := implicit-function-declaration, \ + DISABLED_WARNINGS_clang := deprecated-declarations, \ LDFLAGS := $(LDFLAGS_JDKLIB) \ $(call SET_SHARED_LIBRARY_ORIGIN), \ LDFLAGS_SUFFIX := $(BUILD_LIBKRB5_LIBS), \ diff -r 17efac395638 -r a46e5922bb40 jdk/make/lib/Lib-jdk.pack200.gmk --- a/jdk/make/lib/Lib-jdk.pack200.gmk Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/make/lib/Lib-jdk.pack200.gmk Wed Jul 05 20:25:13 2017 +0200 @@ -42,6 +42,7 @@ CFLAGS_release := -DPRODUCT, \ DISABLED_WARNINGS_gcc := conversion-null sign-compare format-security \ format-nonliteral parentheses, \ + DISABLED_WARNINGS_clang := bool-conversion format-security, \ DISABLED_WARNINGS_solstudio := truncwarn, \ DISABLED_WARNINGS_microsoft := 4267 4018, \ MAPFILE := $(JDK_TOPDIR)/make/mapfiles/libunpack/mapfile-vers, \ diff -r 17efac395638 -r a46e5922bb40 jdk/make/mapfiles/libawt/mapfile-mawt-vers --- a/jdk/make/mapfiles/libawt/mapfile-mawt-vers Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/make/mapfiles/libawt/mapfile-mawt-vers Wed Jul 05 20:25:13 2017 +0200 @@ -243,7 +243,7 @@ getDefaultConfig; Java_sun_font_FontConfigManager_getFontConfig; Java_sun_font_FontConfigManager_getFontConfigAASettings; - Java_sun_awt_X11FontManager_getFontPathNative; + Java_sun_awt_FcFontManager_getFontPathNative; Java_sun_font_SunFontManager_populateFontFileNameMap; # CDE private entry point diff -r 17efac395638 -r a46e5922bb40 jdk/make/mapfiles/libawt/mapfile-vers-linux --- a/jdk/make/mapfiles/libawt/mapfile-vers-linux Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/make/mapfiles/libawt/mapfile-vers-linux Wed Jul 05 20:25:13 2017 +0200 @@ -270,7 +270,7 @@ getDefaultConfig; Java_sun_font_FontConfigManager_getFontConfig; Java_sun_font_FontConfigManager_getFontConfigAASettings; - Java_sun_awt_X11FontManager_getFontPathNative; + Java_sun_awt_FcFontManager_getFontPathNative; Java_sun_font_SunFontManager_populateFontFileNameMap; # CDE private entry point diff -r 17efac395638 -r a46e5922bb40 jdk/make/mapfiles/libawt_headless/mapfile-vers --- a/jdk/make/mapfiles/libawt_headless/mapfile-vers Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/make/mapfiles/libawt_headless/mapfile-vers Wed Jul 05 20:25:13 2017 +0200 @@ -65,7 +65,7 @@ Java_sun_font_FontConfigManager_getFontConfig; Java_sun_font_FontConfigManager_getFontConfigAASettings; Java_sun_font_FontConfigManager_getFontConfigVersion; - Java_sun_awt_X11FontManager_getFontPathNative; + Java_sun_awt_FcFontManager_getFontPathNative; Java_sun_awt_FontDescriptor_initIDs; Java_sun_awt_PlatformFont_initIDs; diff -r 17efac395638 -r a46e5922bb40 jdk/make/mapfiles/libawt_xawt/mapfile-vers --- a/jdk/make/mapfiles/libawt_xawt/mapfile-vers Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/make/mapfiles/libawt_xawt/mapfile-vers Wed Jul 05 20:25:13 2017 +0200 @@ -188,7 +188,7 @@ Java_sun_font_FontConfigManager_getFontConfig; Java_sun_font_FontConfigManager_getFontConfigAASettings; Java_sun_font_FontConfigManager_getFontConfigVersion; - Java_sun_awt_X11FontManager_getFontPathNative; + Java_sun_awt_FcFontManager_getFontPathNative; Java_sun_awt_X11GraphicsEnvironment_initDisplay; Java_sun_awt_X11GraphicsEnvironment_initGLX; Java_sun_awt_X11GraphicsEnvironment_initXRender; diff -r 17efac395638 -r a46e5922bb40 jdk/make/src/classes/build/tools/generatecurrencydata/GenerateCurrencyData.java --- a/jdk/make/src/classes/build/tools/generatecurrencydata/GenerateCurrencyData.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/make/src/classes/build/tools/generatecurrencydata/GenerateCurrencyData.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ import java.util.Date; import java.util.HashMap; import java.util.Locale; +import java.util.Objects; import java.util.Properties; import java.util.TimeZone; @@ -72,10 +73,6 @@ private static String formatVersion; private static String dataVersion; private static String validCurrencyCodes; - private static String currenciesWith0MinorUnitDecimals; - private static String currenciesWith1MinorUnitDecimal; - private static String currenciesWith3MinorUnitDecimal; - private static String currenciesWithMinorUnitsUndefined; // handy constants - must match definitions in java.util.Currency // magic number @@ -83,29 +80,31 @@ // number of characters from A to Z private static final int A_TO_Z = ('Z' - 'A') + 1; // entry for invalid country codes - private static final int INVALID_COUNTRY_ENTRY = 0x007F; + private static final int INVALID_COUNTRY_ENTRY = 0x0000007F; // entry for countries without currency - private static final int COUNTRY_WITHOUT_CURRENCY_ENTRY = 0x0080; + private static final int COUNTRY_WITHOUT_CURRENCY_ENTRY = 0x00000200; // mask for simple case country entries - private static final int SIMPLE_CASE_COUNTRY_MASK = 0x0000; + private static final int SIMPLE_CASE_COUNTRY_MASK = 0x00000000; // mask for simple case country entry final character - private static final int SIMPLE_CASE_COUNTRY_FINAL_CHAR_MASK = 0x001F; + private static final int SIMPLE_CASE_COUNTRY_FINAL_CHAR_MASK = 0x0000001F; // mask for simple case country entry default currency digits - private static final int SIMPLE_CASE_COUNTRY_DEFAULT_DIGITS_MASK = 0x0060; + private static final int SIMPLE_CASE_COUNTRY_DEFAULT_DIGITS_MASK = 0x000001E0; // shift count for simple case country entry default currency digits private static final int SIMPLE_CASE_COUNTRY_DEFAULT_DIGITS_SHIFT = 5; + // maximum number for simple case country entry default currency digits + private static final int SIMPLE_CASE_COUNTRY_MAX_DEFAULT_DIGITS = 9; // mask for special case country entries - private static final int SPECIAL_CASE_COUNTRY_MASK = 0x0080; + private static final int SPECIAL_CASE_COUNTRY_MASK = 0x00000200; // mask for special case country index - private static final int SPECIAL_CASE_COUNTRY_INDEX_MASK = 0x001F; + private static final int SPECIAL_CASE_COUNTRY_INDEX_MASK = 0x0000001F; // delta from entry index component in main table to index into special case tables private static final int SPECIAL_CASE_COUNTRY_INDEX_DELTA = 1; // mask for distinguishing simple and special case countries private static final int COUNTRY_TYPE_MASK = SIMPLE_CASE_COUNTRY_MASK | SPECIAL_CASE_COUNTRY_MASK; // mask for the numeric code of the currency - private static final int NUMERIC_CODE_MASK = 0x0003FF00; + private static final int NUMERIC_CODE_MASK = 0x000FFC00; // shift count for the numeric code of the currency - private static final int NUMERIC_CODE_SHIFT = 8; + private static final int NUMERIC_CODE_SHIFT = 10; // generated data private static int[] mainTable = new int[A_TO_Z * A_TO_Z]; @@ -120,7 +119,7 @@ private static int[] specialCaseOldCurrenciesNumericCode = new int[maxSpecialCases]; private static int[] specialCaseNewCurrenciesNumericCode = new int[maxSpecialCases]; - private static final int maxOtherCurrencies = 70; + private static final int maxOtherCurrencies = 128; private static int otherCurrenciesCount = 0; private static StringBuffer otherCurrencies = new StringBuffer(); private static int[] otherCurrenciesDefaultFractionDigits = new int[maxOtherCurrencies]; @@ -129,6 +128,11 @@ // date format for parsing cut-over times private static SimpleDateFormat format; + // Minor Units + private static String[] currenciesWithDefinedMinorUnitDecimals = + new String[SIMPLE_CASE_COUNTRY_MAX_DEFAULT_DIGITS + 1]; + private static String currenciesWithMinorUnitsUndefined; + public static void main(String[] args) { // Look for "-o outputfilename" option @@ -171,16 +175,14 @@ formatVersion = (String) currencyData.get("formatVersion"); dataVersion = (String) currencyData.get("dataVersion"); validCurrencyCodes = (String) currencyData.get("all"); - currenciesWith0MinorUnitDecimals = (String) currencyData.get("minor0"); - currenciesWith1MinorUnitDecimal = (String) currencyData.get("minor1"); - currenciesWith3MinorUnitDecimal = (String) currencyData.get("minor3"); + for (int i = 0; i <= SIMPLE_CASE_COUNTRY_MAX_DEFAULT_DIGITS; i++) { + currenciesWithDefinedMinorUnitDecimals[i] + = (String) currencyData.get("minor"+i); + } currenciesWithMinorUnitsUndefined = (String) currencyData.get("minorUndefined"); if (formatVersion == null || dataVersion == null || validCurrencyCodes == null || - currenciesWith0MinorUnitDecimals == null || - currenciesWith1MinorUnitDecimal == null || - currenciesWith3MinorUnitDecimal == null || currenciesWithMinorUnitsUndefined == null) { throw new NullPointerException("not all required data is defined in input"); } @@ -207,7 +209,7 @@ if (currencyInfo.charAt(0) == firstChar && currencyInfo.charAt(1) == secondChar) { checkCurrencyCode(currencyInfo); int digits = getDefaultFractionDigits(currencyInfo); - if (digits < 0 || digits > 3) { + if (digits < 0 || digits > SIMPLE_CASE_COUNTRY_MAX_DEFAULT_DIGITS) { throw new RuntimeException("fraction digits out of range for " + currencyInfo); } int numericCode= getNumericCode(currencyInfo); @@ -231,13 +233,14 @@ } private static int getDefaultFractionDigits(String currencyCode) { - if (currenciesWith0MinorUnitDecimals.indexOf(currencyCode) != -1) { - return 0; - } else if (currenciesWith1MinorUnitDecimal.indexOf(currencyCode) != -1) { - return 1; - } else if (currenciesWith3MinorUnitDecimal.indexOf(currencyCode) != -1) { - return 3; - } else if (currenciesWithMinorUnitsUndefined.indexOf(currencyCode) != -1) { + for (int i = 0; i <= SIMPLE_CASE_COUNTRY_MAX_DEFAULT_DIGITS; i++) { + if (Objects.nonNull(currenciesWithDefinedMinorUnitDecimals[i]) && + currenciesWithDefinedMinorUnitDecimals[i].indexOf(currencyCode) != -1) { + return i; + } + } + + if (currenciesWithMinorUnitsUndefined.indexOf(currencyCode) != -1) { return -1; } else { return 2; diff -r 17efac395638 -r a46e5922bb40 jdk/src/demo/share/java2d/J2DBench/src/j2dbench/J2DBench.java --- a/jdk/src/demo/share/java2d/J2DBench/src/j2dbench/J2DBench.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/demo/share/java2d/J2DBench/src/j2dbench/J2DBench.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -61,6 +61,7 @@ import javax.swing.JFileChooser; import javax.swing.JOptionPane; import javax.swing.SwingUtilities; +import javax.swing.WindowConstants; import java.text.SimpleDateFormat; import java.util.Date; @@ -698,7 +699,7 @@ } }; guiFrame = f; - f.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); + f.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE); f.getContentPane().setLayout(new BorderLayout()); f.getContentPane().add(Group.root.getJComponent(), BorderLayout.CENTER); JPanel p = new JPanel(); diff -r 17efac395638 -r a46e5922bb40 jdk/src/demo/share/jfc/SampleTree/SampleTree.java --- a/jdk/src/demo/share/jfc/SampleTree/SampleTree.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/demo/share/jfc/SampleTree/SampleTree.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -135,7 +135,7 @@ panel.add("Center", sp); panel.add("South", constructOptionsPanel()); - frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); + frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE); frame.pack(); frame.setVisible(true); } diff -r 17efac395638 -r a46e5922bb40 jdk/src/demo/share/management/JTop/JTop.java --- a/jdk/src/demo/share/management/JTop/JTop.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/demo/share/management/JTop/JTop.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -410,7 +410,7 @@ private static void createAndShowGUI(JPanel jtop) { // Create and set up the window. JFrame frame = new JFrame("JTop"); - frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); + frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE); // Create and set up the content pane. JComponent contentPane = (JComponent) frame.getContentPane(); diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/io/RandomAccessFile.java --- a/jdk/src/java.base/share/classes/java/io/RandomAccessFile.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/io/RandomAccessFile.java Wed Jul 05 20:25:13 2017 +0200 @@ -276,7 +276,7 @@ * @since 1.4 * @spec JSR-51 */ - public FileChannel getChannel() { + public final FileChannel getChannel() { FileChannel fc = this.channel; if (fc == null) { synchronized (this) { diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/security/AccessControlContext.java --- a/jdk/src/java.base/share/classes/java/security/AccessControlContext.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/security/AccessControlContext.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -153,8 +153,6 @@ * {@code DomainCombiner} with the provided * {@code AccessControlContext}. * - *

- * * @param acc the {@code AccessControlContext} associated * with the provided {@code DomainCombiner}. * @@ -338,8 +336,6 @@ * Get the {@code DomainCombiner} associated with this * {@code AccessControlContext}. * - *

- * * @return the {@code DomainCombiner} associated with this * {@code AccessControlContext}, or {@code null} * if there is none. @@ -738,12 +734,12 @@ /** * Checks two AccessControlContext objects for equality. - * Checks that obj is + * Checks that {@code obj} is * an AccessControlContext and has the same set of ProtectionDomains * as this context. - *

+ * * @param obj the object we are testing for equality with this object. - * @return true if obj is an AccessControlContext, and has the + * @return true if {@code obj} is an AccessControlContext, and has the * same set of ProtectionDomains as this context, false otherwise. */ public boolean equals(Object obj) { diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/security/AllPermission.java --- a/jdk/src/java.base/share/classes/java/security/AllPermission.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/security/AllPermission.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -96,7 +96,7 @@ * objects are always equal. * * @param obj the object we are testing for equality with this object. - * @return true if obj is an AllPermission, false otherwise. + * @return true if {@code obj} is an AllPermission, false otherwise. */ public boolean equals(Object obj) { return (obj instanceof AllPermission); @@ -124,7 +124,6 @@ /** * Returns a new PermissionCollection object for storing AllPermission * objects. - *

* * @return a new PermissionCollection object suitable for * storing AllPermissions. diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/security/BasicPermission.java --- a/jdk/src/java.base/share/classes/java/security/BasicPermission.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/security/BasicPermission.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,7 +56,7 @@ * named permission or you don't.) * Subclasses may implement actions on top of BasicPermission, * if desired. - *

+ * * @see java.security.Permission * @see java.security.Permissions * @see java.security.PermissionCollection @@ -154,8 +154,8 @@ *

* More specifically, this method returns true if: *

    - *
  • p's class is the same as this object's class, and - *
  • p's name equals or (in the case of wildcards) + *
  • {@code p}'s class is the same as this object's class, and + *
  • {@code p}'s name equals or (in the case of wildcards) * is implied by this object's * name. For example, "a.b.*" implies "a.b.c". *
@@ -193,11 +193,11 @@ /** * Checks two BasicPermission objects for equality. - * Checks that obj's class is the same as this object's class + * Checks that {@code obj}'s class is the same as this object's class * and has the same name as this object. - *

+ * * @param obj the object we are testing for equality with this object. - * @return true if obj's class is the same as this object's class + * @return true if {@code obj}'s class is the same as this object's class * and has the same name as this BasicPermission object, false otherwise. */ public boolean equals(Object obj) { diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/security/DomainCombiner.java --- a/jdk/src/java.base/share/classes/java/security/DomainCombiner.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/security/DomainCombiner.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -87,8 +87,6 @@ * Individual ProtectionDomains may be modified (with a new * set of Permissions, for example). * - *

- * * @param currentDomains the ProtectionDomains associated with the * current execution Thread, up to the most recent * privileged {@code ProtectionDomain}. @@ -96,7 +94,7 @@ * with the most recently executing {@code ProtectionDomain} * residing at the beginning of the array. This parameter may * be {@code null} if the current execution Thread - * has no associated ProtectionDomains.

+ * has no associated ProtectionDomains. * * @param assignedDomains an array of inherited ProtectionDomains. * ProtectionDomains may be inherited from a parent Thread, diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/security/KeyRep.java --- a/jdk/src/java.base/share/classes/java/security/KeyRep.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/security/KeyRep.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -112,8 +112,6 @@ /** * Construct the alternate Key class. * - *

- * * @param type either one of Type.SECRET, Type.PUBLIC, or Type.PRIVATE * @param algorithm the algorithm returned from * {@code Key.getAlgorithm()} @@ -157,8 +155,6 @@ * encoded key bytes, and generates a private key from the spec * * - *

- * * @return the resolved Key object * * @exception ObjectStreamException if the Type/format diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/security/KeyStore.java --- a/jdk/src/java.base/share/classes/java/security/KeyStore.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/security/KeyStore.java Wed Jul 05 20:25:13 2017 +0200 @@ -120,7 +120,7 @@ * KeyStore ks = KeyStore.getInstance("JKS"); * * The system will return the most preferred implementation of the - * specified keystore type available in the environment.

+ * specified keystore type available in the environment. * * *

Before a keystore can be accessed, it must be @@ -617,7 +617,6 @@ /** * Retrieves the attributes associated with an entry. - *

* * @return an unmodifiable {@code Set} of attributes, possibly empty * @@ -708,7 +707,6 @@ /** * Retrieves the attributes associated with an entry. - *

* * @return an unmodifiable {@code Set} of attributes, possibly empty * @@ -792,7 +790,6 @@ /** * Retrieves the attributes associated with an entry. - *

* * @return an unmodifiable {@code Set} of attributes, possibly empty * diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/security/KeyStoreSpi.java --- a/jdk/src/java.base/share/classes/java/security/KeyStoreSpi.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/security/KeyStoreSpi.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -595,7 +595,6 @@ * Probes the specified input stream to determine whether it contains a * keystore that is supported by this implementation, or not. * - *

* @implSpec * This method returns false by default. Keystore implementations should * override this method to peek at the data stream directly or to use other diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/security/Permissions.java --- a/jdk/src/java.base/share/classes/java/security/Permissions.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/security/Permissions.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -111,7 +111,7 @@ * * This method creates * a new PermissionCollection object (and adds the permission to it) - * if an appropriate collection does not yet exist.

+ * if an appropriate collection does not yet exist. * * @param permission the Permission object to add. * @@ -162,7 +162,7 @@ * *

Additionally, if this PermissionCollection contains the * AllPermission, this method will always return true. - *

+ * * @param permission the Permission object to check. * * @return true if "permission" is implied by the permissions in the diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/security/ProtectionDomain.java --- a/jdk/src/java.base/share/classes/java/security/ProtectionDomain.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/security/ProtectionDomain.java Wed Jul 05 20:25:13 2017 +0200 @@ -51,7 +51,6 @@ * ProtectionDomain can also be constructed such that it is dynamically * mapped to a set of permissions by the current Policy whenever a permission * is checked. - *

* * @author Li Gong * @author Roland Schemers @@ -168,7 +167,6 @@ * this domain. This constructor affords the * Policy provider the opportunity to augment the supplied * PermissionCollection to reflect policy changes. - *

* * @param codesource the CodeSource associated with this domain * @param permissions the permissions granted to this domain @@ -263,7 +261,6 @@ * permissions, then the permission will be checked against the * combination of the PermissionCollection supplied at construction and * the current Policy binding. - *

* * @param permission the Permission object to check. * diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/security/SecureClassLoader.java --- a/jdk/src/java.base/share/classes/java/security/SecureClassLoader.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/security/SecureClassLoader.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,7 +65,7 @@ *

If there is a security manager, this method first * calls the security manager's {@code checkCreateClassLoader} * method to ensure creation of a class loader is allowed. - *

+ * * @param parent the parent ClassLoader * @exception SecurityException if a security manager exists and its * {@code checkCreateClassLoader} method doesn't allow @@ -112,7 +112,7 @@ *

* If a non-null CodeSource is supplied a ProtectionDomain is * constructed and associated with the class being defined. - *

+ * * @param name the expected name of the class, or {@code null} * if not known, using '.' and not '/' as the separator * and without a trailing ".class" suffix. @@ -149,7 +149,7 @@ *

* If a non-null CodeSource is supplied a ProtectionDomain is * constructed and associated with the class being defined. - *

+ * * @param name the expected name of the class, or {@code null} * if not known, using '.' and not '/' as the separator * and without a trailing ".class" suffix. @@ -180,7 +180,7 @@ * This method is invoked by the defineClass method which takes * a CodeSource as an argument when it is constructing the * ProtectionDomain for the class being defined. - *

+ * * @param codesource the codesource. * * @return the permissions granted to the codesource. diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/security/UnresolvedPermission.java --- a/jdk/src/java.base/share/classes/java/security/UnresolvedPermission.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/security/UnresolvedPermission.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -310,7 +310,7 @@ /** * Checks two UnresolvedPermission objects for equality. - * Checks that obj is an UnresolvedPermission, and has + * Checks that {@code obj} is an UnresolvedPermission, and has * the same type (class) name, permission name, actions, and * certificates as this object. * @@ -491,7 +491,7 @@ /** * Returns a new PermissionCollection object for storing * UnresolvedPermission objects. - *

+ * * @return a new PermissionCollection object suitable for * storing UnresolvedPermissions. */ diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/security/cert/Certificate.java --- a/jdk/src/java.base/share/classes/java/security/cert/Certificate.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/security/cert/Certificate.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,7 +41,7 @@ * An identity certificate is a binding of a principal to a public key which * is vouched for by another principal. (A principal represents * an entity such as an individual user, a group, or a corporation.) - *

+ *

* This class is an abstraction for certificates that have different * formats but important common uses. For example, different types of * certificates, such as X.509 and PGP, share general certificate @@ -248,9 +248,7 @@ * Construct the alternate Certificate class with the Certificate * type and Certificate encoding bytes. * - *

- * - * @param type the standard name of the Certificate type.

+ * @param type the standard name of the Certificate type. * * @param data the Certificate data. */ @@ -262,8 +260,6 @@ /** * Resolve the Certificate Object. * - *

- * * @return the resolved Certificate Object * * @throws java.io.ObjectStreamException if the Certificate diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/util/Currency.java --- a/jdk/src/java.base/share/classes/java/util/Currency.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/util/Currency.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -140,11 +140,11 @@ // - maps country code to 32-bit int // - 26*26 entries, corresponding to [A-Z]*[A-Z] // - \u007F -> not valid country - // - bits 18-31: unused - // - bits 8-17: numeric code (0 to 1023) - // - bit 7: 1 - special case, bits 0-4 indicate which one + // - bits 20-31: unused + // - bits 10-19: numeric code (0 to 1023) + // - bit 9: 1 - special case, bits 0-4 indicate which one // 0 - simple country, bits 0-4 indicate final char of currency code - // - bits 5-6: fraction digits for simple countries, 0 for special cases + // - bits 5-8: fraction digits for simple countries, 0 for special cases // - bits 0-4: final char for currency code for simple country, or ID of special case // - special case IDs: // - 0: country has no currency @@ -182,32 +182,34 @@ // number of characters from A to Z private static final int A_TO_Z = ('Z' - 'A') + 1; // entry for invalid country codes - private static final int INVALID_COUNTRY_ENTRY = 0x007F; + private static final int INVALID_COUNTRY_ENTRY = 0x0000007F; // entry for countries without currency - private static final int COUNTRY_WITHOUT_CURRENCY_ENTRY = 0x0080; + private static final int COUNTRY_WITHOUT_CURRENCY_ENTRY = 0x00000200; // mask for simple case country entries - private static final int SIMPLE_CASE_COUNTRY_MASK = 0x0000; + private static final int SIMPLE_CASE_COUNTRY_MASK = 0x00000000; // mask for simple case country entry final character - private static final int SIMPLE_CASE_COUNTRY_FINAL_CHAR_MASK = 0x001F; + private static final int SIMPLE_CASE_COUNTRY_FINAL_CHAR_MASK = 0x0000001F; // mask for simple case country entry default currency digits - private static final int SIMPLE_CASE_COUNTRY_DEFAULT_DIGITS_MASK = 0x0060; + private static final int SIMPLE_CASE_COUNTRY_DEFAULT_DIGITS_MASK = 0x000001E0; // shift count for simple case country entry default currency digits private static final int SIMPLE_CASE_COUNTRY_DEFAULT_DIGITS_SHIFT = 5; + // maximum number for simple case country entry default currency digits + private static final int SIMPLE_CASE_COUNTRY_MAX_DEFAULT_DIGITS = 9; // mask for special case country entries - private static final int SPECIAL_CASE_COUNTRY_MASK = 0x0080; + private static final int SPECIAL_CASE_COUNTRY_MASK = 0x00000200; // mask for special case country index - private static final int SPECIAL_CASE_COUNTRY_INDEX_MASK = 0x001F; + private static final int SPECIAL_CASE_COUNTRY_INDEX_MASK = 0x0000001F; // delta from entry index component in main table to index into special case tables private static final int SPECIAL_CASE_COUNTRY_INDEX_DELTA = 1; // mask for distinguishing simple and special case countries private static final int COUNTRY_TYPE_MASK = SIMPLE_CASE_COUNTRY_MASK | SPECIAL_CASE_COUNTRY_MASK; // mask for the numeric code of the currency - private static final int NUMERIC_CODE_MASK = 0x0003FF00; + private static final int NUMERIC_CODE_MASK = 0x000FFC00; // shift count for the numeric code of the currency - private static final int NUMERIC_CODE_SHIFT = 8; + private static final int NUMERIC_CODE_SHIFT = 10; // Currency data format version - private static final int VALID_FORMAT_VERSION = 1; + private static final int VALID_FORMAT_VERSION = 2; static { AccessController.doPrivileged(new PrivilegedAction() { @@ -261,7 +263,7 @@ Set keys = props.stringPropertyNames(); Pattern propertiesPattern = Pattern.compile("([A-Z]{3})\\s*,\\s*(\\d{3})\\s*,\\s*" + - "([0-3])\\s*,?\\s*(\\d{4}-\\d{2}-\\d{2}T\\d{2}:" + + "(\\d+)\\s*,?\\s*(\\d{4}-\\d{2}-\\d{2}T\\d{2}:" + "\\d{2}:\\d{2})?"); for (String key : keys) { replaceCurrencyData(propertiesPattern, @@ -682,7 +684,7 @@ * @param ctry country code * @param curdata currency data. This is a comma separated string that * consists of "three-letter alphabet code", "three-digit numeric code", - * and "one-digit (0,1,2, or 3) default fraction digit". + * and "one-digit (0-9) default fraction digit". * For example, "JPZ,392,0". * An optional UTC date can be appended to the string (comma separated) * to allow a currency change take effect after date specified. @@ -721,8 +723,14 @@ String code = m.group(1); int numeric = Integer.parseInt(m.group(2)); + int entry = numeric << NUMERIC_CODE_SHIFT; int fraction = Integer.parseInt(m.group(3)); - int entry = numeric << NUMERIC_CODE_SHIFT; + if (fraction > SIMPLE_CASE_COUNTRY_MAX_DEFAULT_DIGITS) { + info("currency.properties entry for " + ctry + + " ignored since the fraction is more than " + + SIMPLE_CASE_COUNTRY_MAX_DEFAULT_DIGITS + ":" + curdata, null); + return; + } int index; for (index = 0; index < scOldCurrencies.length; index++) { diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/util/Properties.java --- a/jdk/src/java.base/share/classes/java/util/Properties.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/util/Properties.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1995, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1995, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,8 +34,6 @@ import java.io.Writer; import java.io.OutputStreamWriter; import java.io.BufferedWriter; -import java.security.AccessController; -import java.security.PrivilegedAction; import jdk.internal.util.xml.PropertiesDefaultHandler; @@ -311,9 +309,11 @@ * input stream. * @throws IllegalArgumentException if a malformed Unicode escape * appears in the input. + * @throws NullPointerException if {@code reader} is null. * @since 1.6 */ public synchronized void load(Reader reader) throws IOException { + Objects.requireNonNull(reader, "reader parameter is null"); load0(new LineReader(reader)); } @@ -335,9 +335,11 @@ * input stream. * @throws IllegalArgumentException if the input stream contains a * malformed Unicode escape sequence. + * @throws NullPointerException if {@code inStream} is null. * @since 1.2 */ public synchronized void load(InputStream inStream) throws IOException { + Objects.requireNonNull(inStream, "inStream parameter is null"); load0(new LineReader(inStream)); } diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/util/regex/Matcher.java --- a/jdk/src/java.base/share/classes/java/util/regex/Matcher.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/util/regex/Matcher.java Wed Jul 05 20:25:13 2017 +0200 @@ -292,11 +292,13 @@ @Override public int start() { + checkMatch(); return first; } @Override public int start(int group) { + checkMatch(); if (group < 0 || group > groupCount) throw new IndexOutOfBoundsException("No group " + group); return groups[group * 2]; @@ -304,11 +306,13 @@ @Override public int end() { + checkMatch(); return last; } @Override public int end(int group) { + checkMatch(); if (group < 0 || group > groupCount) throw new IndexOutOfBoundsException("No group " + group); return groups[group * 2 + 1]; @@ -321,17 +325,25 @@ @Override public String group() { + checkMatch(); return group(0); } @Override public String group(int group) { + checkMatch(); if (group < 0 || group > groupCount) throw new IndexOutOfBoundsException("No group " + group); if ((groups[group*2] == -1) || (groups[group*2+1] == -1)) return null; return text.subSequence(groups[group * 2], groups[group * 2 + 1]).toString(); } + + private void checkMatch() { + if (first < 0) + throw new IllegalStateException("No match found"); + + } } /** diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/util/stream/DoublePipeline.java --- a/jdk/src/java.base/share/classes/java/util/stream/DoublePipeline.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/util/stream/DoublePipeline.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -447,7 +447,7 @@ @Override public final long count() { - return mapToLong(e -> 1L).sum(); + return evaluate(ReduceOps.makeDoubleCounting()); } @Override diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/util/stream/DoubleStream.java --- a/jdk/src/java.base/share/classes/java/util/stream/DoubleStream.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/util/stream/DoubleStream.java Wed Jul 05 20:25:13 2017 +0200 @@ -581,6 +581,24 @@ * *

This is a terminal operation. * + * @apiNote + * An implementation may choose to not execute the stream pipeline (either + * sequentially or in parallel) if it is capable of computing the count + * directly from the stream source. In such cases no source elements will + * be traversed and no intermediate operations will be evaluated. + * Behavioral parameters with side-effects, which are strongly discouraged + * except for harmless cases such as debugging, may be affected. For + * example, consider the following stream: + *

{@code
+     *     DoubleStream s = DoubleStream.of(1, 2, 3, 4);
+     *     long count = s.peek(System.out::println).count();
+     * }
+ * The number of elements covered by the stream source is known and the + * intermediate operation, {@code peek}, does not inject into or remove + * elements from the stream (as may be the case for {@code flatMap} or + * {@code filter} operations). Thus the count is 4 and there is no need to + * execute the pipeline and, as a side-effect, print out the elements. + * * @return the count of elements in this stream */ long count(); diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/util/stream/IntPipeline.java --- a/jdk/src/java.base/share/classes/java/util/stream/IntPipeline.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/util/stream/IntPipeline.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -427,7 +427,7 @@ @Override public final long count() { - return mapToLong(e -> 1L).sum(); + return evaluate(ReduceOps.makeIntCounting()); } @Override diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/util/stream/IntStream.java --- a/jdk/src/java.base/share/classes/java/util/stream/IntStream.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/util/stream/IntStream.java Wed Jul 05 20:25:13 2017 +0200 @@ -504,6 +504,24 @@ * *

This is a terminal operation. * + * @apiNote + * An implementation may choose to not execute the stream pipeline (either + * sequentially or in parallel) if it is capable of computing the count + * directly from the stream source. In such cases no source elements will + * be traversed and no intermediate operations will be evaluated. + * Behavioral parameters with side-effects, which are strongly discouraged + * except for harmless cases such as debugging, may be affected. For + * example, consider the following stream: + *

{@code
+     *     IntStream s = IntStream.of(1, 2, 3, 4);
+     *     long count = s.peek(System.out::println).count();
+     * }
+ * The number of elements covered by the stream source is known and the + * intermediate operation, {@code peek}, does not inject into or remove + * elements from the stream (as may be the case for {@code flatMap} or + * {@code filter} operations). Thus the count is 4 and there is no need to + * execute the pipeline and, as a side-effect, print out the elements. + * * @return the count of elements in this stream */ long count(); diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/util/stream/LongPipeline.java --- a/jdk/src/java.base/share/classes/java/util/stream/LongPipeline.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/util/stream/LongPipeline.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -425,7 +425,7 @@ @Override public final long count() { - return map(e -> 1L).sum(); + return evaluate(ReduceOps.makeLongCounting()); } @Override diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/util/stream/LongStream.java --- a/jdk/src/java.base/share/classes/java/util/stream/LongStream.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/util/stream/LongStream.java Wed Jul 05 20:25:13 2017 +0200 @@ -509,6 +509,24 @@ * *

This is a terminal operation. * + * @apiNote + * An implementation may choose to not execute the stream pipeline (either + * sequentially or in parallel) if it is capable of computing the count + * directly from the stream source. In such cases no source elements will + * be traversed and no intermediate operations will be evaluated. + * Behavioral parameters with side-effects, which are strongly discouraged + * except for harmless cases such as debugging, may be affected. For + * example, consider the following stream: + *

{@code
+     *     LongStream s = LongStream.of(1, 2, 3, 4);
+     *     long count = s.peek(System.out::println).count();
+     * }
+ * The number of elements covered by the stream source is known and the + * intermediate operation, {@code peek}, does not inject into or remove + * elements from the stream (as may be the case for {@code flatMap} or + * {@code filter} operations). Thus the count is 4 and there is no need to + * execute the pipeline and, as a side-effect, print out the elements. + * * @return the count of elements in this stream */ long count(); diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/util/stream/ReduceOps.java --- a/jdk/src/java.base/share/classes/java/util/stream/ReduceOps.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/util/stream/ReduceOps.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -234,6 +234,40 @@ } /** + * Constructs a {@code TerminalOp} that counts the number of stream + * elements. If the size of the pipeline is known then count is the size + * and there is no need to evaluate the pipeline. If the size of the + * pipeline is non known then count is produced, via reduction, using a + * {@link CountingSink}. + * + * @param the type of the input elements + * @return a {@code TerminalOp} implementing the counting + */ + public static TerminalOp + makeRefCounting() { + return new ReduceOp>(StreamShape.REFERENCE) { + @Override + public CountingSink makeSink() { return new CountingSink.OfRef<>(); } + + @Override + public Long evaluateSequential(PipelineHelper helper, + Spliterator spliterator) { + if (StreamOpFlag.SIZED.isKnown(helper.getStreamAndOpFlags())) + return spliterator.getExactSizeIfKnown(); + return super.evaluateSequential(helper, spliterator); + } + + @Override + public Long evaluateParallel(PipelineHelper helper, + Spliterator spliterator) { + if (StreamOpFlag.SIZED.isKnown(helper.getStreamAndOpFlags())) + return spliterator.getExactSizeIfKnown(); + return super.evaluateParallel(helper, spliterator); + } + }; + } + + /** * Constructs a {@code TerminalOp} that implements a functional reduce on * {@code int} values. * @@ -370,6 +404,39 @@ } /** + * Constructs a {@code TerminalOp} that counts the number of stream + * elements. If the size of the pipeline is known then count is the size + * and there is no need to evaluate the pipeline. If the size of the + * pipeline is non known then count is produced, via reduction, using a + * {@link CountingSink}. + * + * @return a {@code TerminalOp} implementing the counting + */ + public static TerminalOp + makeIntCounting() { + return new ReduceOp>(StreamShape.INT_VALUE) { + @Override + public CountingSink makeSink() { return new CountingSink.OfInt(); } + + @Override + public Long evaluateSequential(PipelineHelper helper, + Spliterator spliterator) { + if (StreamOpFlag.SIZED.isKnown(helper.getStreamAndOpFlags())) + return spliterator.getExactSizeIfKnown(); + return super.evaluateSequential(helper, spliterator); + } + + @Override + public Long evaluateParallel(PipelineHelper helper, + Spliterator spliterator) { + if (StreamOpFlag.SIZED.isKnown(helper.getStreamAndOpFlags())) + return spliterator.getExactSizeIfKnown(); + return super.evaluateParallel(helper, spliterator); + } + }; + } + + /** * Constructs a {@code TerminalOp} that implements a functional reduce on * {@code long} values. * @@ -506,6 +573,39 @@ } /** + * Constructs a {@code TerminalOp} that counts the number of stream + * elements. If the size of the pipeline is known then count is the size + * and there is no need to evaluate the pipeline. If the size of the + * pipeline is non known then count is produced, via reduction, using a + * {@link CountingSink}. + * + * @return a {@code TerminalOp} implementing the counting + */ + public static TerminalOp + makeLongCounting() { + return new ReduceOp>(StreamShape.LONG_VALUE) { + @Override + public CountingSink makeSink() { return new CountingSink.OfLong(); } + + @Override + public Long evaluateSequential(PipelineHelper helper, + Spliterator spliterator) { + if (StreamOpFlag.SIZED.isKnown(helper.getStreamAndOpFlags())) + return spliterator.getExactSizeIfKnown(); + return super.evaluateSequential(helper, spliterator); + } + + @Override + public Long evaluateParallel(PipelineHelper helper, + Spliterator spliterator) { + if (StreamOpFlag.SIZED.isKnown(helper.getStreamAndOpFlags())) + return spliterator.getExactSizeIfKnown(); + return super.evaluateParallel(helper, spliterator); + } + }; + } + + /** * Constructs a {@code TerminalOp} that implements a functional reduce on * {@code double} values. * @@ -642,6 +742,91 @@ } /** + * Constructs a {@code TerminalOp} that counts the number of stream + * elements. If the size of the pipeline is known then count is the size + * and there is no need to evaluate the pipeline. If the size of the + * pipeline is non known then count is produced, via reduction, using a + * {@link CountingSink}. + * + * @return a {@code TerminalOp} implementing the counting + */ + public static TerminalOp + makeDoubleCounting() { + return new ReduceOp>(StreamShape.DOUBLE_VALUE) { + @Override + public CountingSink makeSink() { return new CountingSink.OfDouble(); } + + @Override + public Long evaluateSequential(PipelineHelper helper, + Spliterator spliterator) { + if (StreamOpFlag.SIZED.isKnown(helper.getStreamAndOpFlags())) + return spliterator.getExactSizeIfKnown(); + return super.evaluateSequential(helper, spliterator); + } + + @Override + public Long evaluateParallel(PipelineHelper helper, + Spliterator spliterator) { + if (StreamOpFlag.SIZED.isKnown(helper.getStreamAndOpFlags())) + return spliterator.getExactSizeIfKnown(); + return super.evaluateParallel(helper, spliterator); + } + }; + } + + /** + * A sink that counts elements + */ + static abstract class CountingSink + extends Box + implements AccumulatingSink> { + long count; + + @Override + public void begin(long size) { + count = 0L; + } + + @Override + public Long get() { + return count; + } + + @Override + public void combine(CountingSink other) { + count += other.count; + } + + static final class OfRef extends CountingSink { + @Override + public void accept(T t) { + count++; + } + } + + static final class OfInt extends CountingSink implements Sink.OfInt { + @Override + public void accept(int t) { + count++; + } + } + + static final class OfLong extends CountingSink implements Sink.OfLong { + @Override + public void accept(long t) { + count++; + } + } + + static final class OfDouble extends CountingSink implements Sink.OfDouble { + @Override + public void accept(double t) { + count++; + } + } + } + + /** * A type of {@code TerminalSink} that implements an associative reducing * operation on elements of type {@code T} and producing a result of type * {@code R}. @@ -652,7 +837,7 @@ */ private interface AccumulatingSink> extends TerminalSink { - public void combine(K other); + void combine(K other); } /** diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/util/stream/ReferencePipeline.java --- a/jdk/src/java.base/share/classes/java/util/stream/ReferencePipeline.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/util/stream/ReferencePipeline.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -523,10 +523,9 @@ @Override public final long count() { - return mapToLong(e -> 1L).sum(); + return evaluate(ReduceOps.makeRefCounting()); } - // /** diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/java/util/stream/Stream.java --- a/jdk/src/java.base/share/classes/java/util/stream/Stream.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/java/util/stream/Stream.java Wed Jul 05 20:25:13 2017 +0200 @@ -851,6 +851,25 @@ * *

This is a terminal operation. * + * @apiNote + * An implementation may choose to not execute the stream pipeline (either + * sequentially or in parallel) if it is capable of computing the count + * directly from the stream source. In such cases no source elements will + * be traversed and no intermediate operations will be evaluated. + * Behavioral parameters with side-effects, which are strongly discouraged + * except for harmless cases such as debugging, may be affected. For + * example, consider the following stream: + *

{@code
+     *     List l = Arrays.asList("A", "B", "C", "D");
+     *     long count = l.stream().peek(System.out::println).count();
+     * }
+ * The number of elements covered by the stream source, a {@code List}, is + * known and the intermediate operation, {@code peek}, does not inject into + * or remove elements from the stream (as may be the case for + * {@code flatMap} or {@code filter} operations). Thus the count is the + * size of the {@code List} and there is no need to execute the pipeline + * and, as a side-effect, print out the list elements. + * * @return the count of elements in this stream */ long count(); diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/javax/crypto/CipherInputStream.java --- a/jdk/src/java.base/share/classes/javax/crypto/CipherInputStream.java Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/javax/crypto/CipherInputStream.java Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -172,7 +172,6 @@ * -1 is returned. This method blocks until input data * is available, the end of the stream is detected, or an exception * is thrown. - *

* * @return the next byte of data, or -1 if the end of the * stream is reached. diff -r 17efac395638 -r a46e5922bb40 jdk/src/java.base/share/classes/javax/crypto/package.html --- a/jdk/src/java.base/share/classes/javax/crypto/package.html Thu Mar 19 16:13:40 2015 -0700 +++ b/jdk/src/java.base/share/classes/javax/crypto/package.html Wed Jul 05 20:25:13 2017 +0200 @@ -1,5 +1,5 @@