--- a/hotspot/agent/src/os/linux/ps_proc.c Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/agent/src/os/linux/ps_proc.c Fri Aug 13 10:55:42 2010 -0700
@@ -253,7 +253,11 @@
if (nwords > 5 && find_lib(ph, word[5]) == false) {
intptr_t base;
lib_info* lib;
+#ifdef _LP64
sscanf(word[0], "%lx", &base);
+#else
+ sscanf(word[0], "%x", &base);
+#endif
if ((lib = add_lib_info(ph, word[5], (uintptr_t)base)) == NULL)
continue; // ignore, add_lib_info prints error
--- a/hotspot/make/Makefile Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/make/Makefile Fri Aug 13 10:55:42 2010 -0700
@@ -85,14 +85,21 @@
C2_VM_TARGETS=product fastdebug optimized jvmg
KERNEL_VM_TARGETS=productkernel fastdebugkernel optimizedkernel jvmgkernel
ZERO_VM_TARGETS=productzero fastdebugzero optimizedzero jvmgzero
+SHARK_VM_TARGETS=productshark fastdebugshark optimizedshark jvmgshark
# JDK directory list
JDK_DIRS=bin include jre lib demo
all: all_product all_fastdebug
+ifndef BUILD_CLIENT_ONLY
all_product: product product1 productkernel docs export_product
all_fastdebug: fastdebug fastdebug1 fastdebugkernel docs export_fastdebug
all_debug: jvmg jvmg1 jvmgkernel docs export_debug
+else
+all_product: product1 docs export_product
+all_fastdebug: fastdebug1 docs export_fastdebug
+all_debug: jvmg1 docs export_debug
+endif
all_optimized: optimized optimized1 optimizedkernel docs export_optimized
allzero: all_productzero all_fastdebugzero
@@ -101,6 +108,12 @@
all_debugzero: jvmgzero docs export_debug
all_optimizedzero: optimizedzero docs export_optimized
+allshark: all_productshark all_fastdebugshark
+all_productshark: productshark docs export_product
+all_fastdebugshark: fastdebugshark docs export_fastdebug
+all_debugshark: jvmgshark docs export_debug
+all_optimizedshark: optimizedshark docs export_optimized
+
# Do everything
world: all create_jdk
@@ -131,6 +144,10 @@
$(CD) $(GAMMADIR)/make; \
$(MAKE) VM_TARGET=$@ generic_buildzero $(ALT_OUT)
+$(SHARK_VM_TARGETS):
+ $(CD) $(GAMMADIR)/make; \
+ $(MAKE) VM_TARGET=$@ generic_buildshark $(ALT_OUT)
+
# Build compiler1 (client) rule, different for platforms
generic_build1:
$(MKDIR) -p $(OUTPUTDIR)
@@ -197,6 +214,12 @@
$(MAKE) -f $(ABS_OS_MAKEFILE) \
$(MAKE_ARGS) $(VM_TARGET)
+generic_buildshark:
+ $(MKDIR) -p $(OUTPUTDIR)
+ $(CD) $(OUTPUTDIR); \
+ $(MAKE) -f $(ABS_OS_MAKEFILE) \
+ $(MAKE_ARGS) $(VM_TARGET)
+
# Export file rule
generic_export: $(EXPORT_LIST)
export_product:
@@ -228,15 +251,22 @@
C2_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_compiler2
KERNEL_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_kernel
ZERO_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_zero
+SHARK_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_shark
C1_DIR=$(C1_BASE_DIR)/$(VM_SUBDIR)
C2_DIR=$(C2_BASE_DIR)/$(VM_SUBDIR)
KERNEL_DIR=$(KERNEL_BASE_DIR)/$(VM_SUBDIR)
ZERO_DIR=$(ZERO_BASE_DIR)/$(VM_SUBDIR)
+SHARK_DIR=$(SHARK_BASE_DIR)/$(VM_SUBDIR)
# Misc files and generated files need to come from C1 or C2 area
ifeq ($(ZERO_BUILD), true)
+ifeq ($(SHARK_BUILD), true)
+ MISC_DIR=$(SHARK_DIR)
+ GEN_DIR=$(SHARK_BASE_DIR)/generated
+else
MISC_DIR=$(ZERO_DIR)
GEN_DIR=$(ZERO_BASE_DIR)/generated
+endif
else
ifeq ($(ARCH_DATA_MODEL), 32)
MISC_DIR=$(C1_DIR)
@@ -290,11 +320,20 @@
# Shared Library
ifneq ($(OSNAME),windows)
ifeq ($(ZERO_BUILD), true)
+ ifeq ($(SHARK_BUILD), true)
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.so: $(SHARK_DIR)/%.so
+ $(install-file)
+$(EXPORT_SERVER_DIR)/%.so: $(SHARK_DIR)/%.so
+ $(install-file)
+ else
$(EXPORT_JRE_LIB_ARCH_DIR)/%.so: $(ZERO_DIR)/%.so
$(install-file)
$(EXPORT_SERVER_DIR)/%.so: $(ZERO_DIR)/%.so
$(install-file)
+ endif
else
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.so: $(C1_DIR)/%.so
+ $(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.so: $(C2_DIR)/%.so
$(install-file)
$(EXPORT_CLIENT_DIR)/%.so: $(C1_DIR)/%.so
@@ -348,6 +387,7 @@
$(RM) -r $(C2_DIR)
$(RM) -r $(KERNEL_DIR)
$(RM) -r $(ZERO_DIR)
+ $(RM) -r $(SHARK_DIR)
clean_export:
$(RM) -r $(EXPORT_PATH)
clean_jdk:
--- a/hotspot/make/defs.make Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/make/defs.make Fri Aug 13 10:55:42 2010 -0700
@@ -192,13 +192,16 @@
# Use uname output for SRCARCH, but deal with platform differences. If ARCH
# is not explicitly listed below, it is treated as x86.
- SRCARCH = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 zero,$(ARCH)))
+ SRCARCH = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc zero,$(ARCH)))
ARCH/ = x86
ARCH/sparc = sparc
ARCH/sparc64= sparc
ARCH/ia64 = ia64
ARCH/amd64 = x86
ARCH/x86_64 = x86
+ ARCH/ppc64 = ppc
+ ARCH/ppc = ppc
+ ARCH/arm = arm
ARCH/zero = zero
# BUILDARCH is usually the same as SRCARCH, except for sparcv9
@@ -223,6 +226,9 @@
LIBARCH/sparc = sparc
LIBARCH/sparcv9 = sparcv9
LIBARCH/ia64 = ia64
+ LIBARCH/ppc64 = ppc
+ LIBARCH/ppc = ppc
+ LIBARCH/arm = arm
LIBARCH/zero = $(ZERO_LIBARCH)
LP64_ARCH = sparcv9 amd64 ia64 zero
--- a/hotspot/make/linux/Makefile Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/make/linux/Makefile Fri Aug 13 10:55:42 2010 -0700
@@ -1,5 +1,5 @@
#
-# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -168,6 +168,13 @@
# profiledzero zero <os>_<arch>_zero/profiled
# productzero zero <os>_<arch>_zero/product
#
+# debugshark shark <os>_<arch>_shark/debug
+# fastdebugshark shark <os>_<arch>_shark/fastdebug
+# jvmgshark shark <os>_<arch>_shark/jvmg
+# optimizedshark shark <os>_<arch>_shark/optimized
+# profiledshark shark <os>_<arch>_shark/profiled
+# productshark shark <os>_<arch>_shark/product
+#
# What you get with each target:
#
# debug* - "thin" libjvm_g - debug info linked into the gamma_g launcher
@@ -191,12 +198,14 @@
SUBDIRS_TIERED = $(addprefix $(OSNAME)_$(BUILDARCH)_tiered/,$(TARGETS))
SUBDIRS_CORE = $(addprefix $(OSNAME)_$(BUILDARCH)_core/,$(TARGETS))
SUBDIRS_ZERO = $(addprefix $(OSNAME)_$(VARIANTARCH)_zero/,$(TARGETS))
+SUBDIRS_SHARK = $(addprefix $(OSNAME)_$(VARIANTARCH)_shark/,$(TARGETS))
TARGETS_C2 = $(TARGETS)
TARGETS_C1 = $(addsuffix 1,$(TARGETS))
TARGETS_TIERED = $(addsuffix tiered,$(TARGETS))
TARGETS_CORE = $(addsuffix core,$(TARGETS))
TARGETS_ZERO = $(addsuffix zero,$(TARGETS))
+TARGETS_SHARK = $(addsuffix shark,$(TARGETS))
BUILDTREE_MAKE = $(GAMMADIR)/make/$(OSNAME)/makefiles/buildtree.make
BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) ARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH)
@@ -213,6 +222,7 @@
@echo " $(TARGETS_C1)"
@echo " $(TARGETS_CORE)"
@echo " $(TARGETS_ZERO)"
+ @echo " $(TARGETS_SHARK)"
checks: check_os_version check_j2se_version
@@ -266,6 +276,10 @@
$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
$(BUILDTREE) VARIANT=zero VARIANTARCH=$(VARIANTARCH)
+$(SUBDIRS_SHARK): $(BUILDTREE_MAKE) platform_zero
+ $(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
+ $(BUILDTREE) VARIANT=shark VARIANTARCH=$(VARIANTARCH)
+
platform_zero: $(GAMMADIR)/make/$(OSNAME)/platform_zero.in
$(SED) 's/@ZERO_ARCHDEF@/$(ZERO_ARCHDEF)/g;s/@ZERO_LIBARCH@/$(ZERO_LIBARCH)/g;' < $< > $@
@@ -306,11 +320,19 @@
cd $(OSNAME)_$(VARIANTARCH)_zero/$(patsubst %zero,%,$@) && $(MAKE) $(MFLAGS) install
endif
+$(TARGETS_SHARK): $(SUBDIRS_SHARK)
+ cd $(OSNAME)_$(VARIANTARCH)_shark/$(patsubst %shark,%,$@) && $(MAKE) $(MFLAGS)
+ cd $(OSNAME)_$(VARIANTARCH)_shark/$(patsubst %shark,%,$@) && ./test_gamma
+ifdef INSTALL
+ cd $(OSNAME)_$(VARIANTARCH)_shark/$(patsubst %shark,%,$@) && $(MAKE) $(MFLAGS) install
+endif
+
# Just build the tree, and nothing else:
tree: $(SUBDIRS_C2)
tree1: $(SUBDIRS_C1)
treecore: $(SUBDIRS_CORE)
treezero: $(SUBDIRS_ZERO)
+treeshark: $(SUBDIRS_SHARK)
# Doc target. This is the same for all build options.
# Hence create a docs directory beside ...$(ARCH)_[...]
@@ -327,20 +349,22 @@
zero: jvmgzero productzero
+shark: jvmgshark productshark
+
clean_docs:
rm -rf $(SUBDIR_DOCS)
-clean_compiler1 clean_compiler2 clean_core clean_zero:
+clean_compiler1 clean_compiler2 clean_core clean_zero clean_shark:
rm -rf $(OSNAME)_$(BUILDARCH)_$(subst clean_,,$@)
-clean: clean_compiler2 clean_compiler1 clean_core clean_zero clean_docs
+clean: clean_compiler2 clean_compiler1 clean_core clean_zero clean_shark clean_docs
include $(GAMMADIR)/make/$(OSNAME)/makefiles/cscope.make
#-------------------------------------------------------------------------------
-.PHONY: $(TARGETS_C2) $(TARGETS_C1) $(TARGETS_CORE) $(TARGETS_ZERO)
-.PHONY: tree tree1 treecore treezero
-.PHONY: all compiler1 compiler2 core zero
-.PHONY: clean clean_compiler1 clean_compiler2 clean_core clean_zero docs clean_docs
+.PHONY: $(TARGETS_C2) $(TARGETS_C1) $(TARGETS_CORE) $(TARGETS_ZERO) $(TARGETS_SHARK)
+.PHONY: tree tree1 treecore treezero treeshark
+.PHONY: all compiler1 compiler2 core zero shark
+.PHONY: clean clean_compiler1 clean_compiler2 clean_core clean_zero clean_shark docs clean_docs
.PHONY: checks check_os_version check_j2se_version
--- a/hotspot/make/linux/makefiles/build_vm_def.sh Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/make/linux/makefiles/build_vm_def.sh Fri Aug 13 10:55:42 2010 -0700
@@ -1,5 +1,12 @@
#!/bin/sh
-nm --defined-only $* | awk '
+# If we're cross compiling use that path for nm
+if [ "$ALT_COMPILER_PATH" != "" ]; then
+NM=$ALT_COMPILER_PATH/nm
+else
+NM=nm
+fi
+
+$NM --defined-only $* | awk '
{ if ($3 ~ /^_ZTV/ || $3 ~ /^gHotSpotVM/) print "\t" $3 ";" }
'
--- a/hotspot/make/linux/makefiles/buildtree.make Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/make/linux/makefiles/buildtree.make Fri Aug 13 10:55:42 2010 -0700
@@ -339,12 +339,16 @@
WRONG_DATA_MODE_MSG = \
echo "JAVA_HOME must point to $(DATA_MODE)bit JDK."
+CROSS_COMPILING_MSG = \
+ echo "Cross compiling for ARCH $(CROSS_COMPILE_ARCH), skipping gamma run."
+
test_gamma: $(BUILDTREE_MAKE) $(GAMMADIR)/make/test/Queens.java
@echo Creating $@ ...
$(QUIETLY) ( \
echo '#!/bin/sh'; \
$(BUILDTREE_COMMENT); \
echo '. ./env.sh'; \
+ echo "if [ \"$(CROSS_COMPILE_ARCH)\" != \"\" ]; then { $(CROSS_COMPILING_MSG); exit 0; }; fi"; \
echo "if [ -z \$$JAVA_HOME ]; then { $(NO_JAVA_HOME_MSG); exit 0; }; fi"; \
echo "if ! \$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion 2>&1 > /dev/null"; \
echo "then"; \
--- a/hotspot/make/linux/makefiles/defs.make Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/make/linux/makefiles/defs.make Fri Aug 13 10:55:42 2010 -0700
@@ -98,6 +98,22 @@
HS_ARCH = x86
endif
+# ARM
+ifeq ($(ARCH), arm)
+ ARCH_DATA_MODEL = 32
+ PLATFORM = linux-arm
+ VM_PLATFORM = linux_arm
+ HS_ARCH = arm
+endif
+
+# PPC
+ifeq ($(ARCH), ppc)
+ ARCH_DATA_MODEL = 32
+ PLATFORM = linux-ppc
+ VM_PLATFORM = linux_ppc
+ HS_ARCH = ppc
+endif
+
JDK_INCLUDE_SUBDIR=linux
# FIXUP: The subdirectory for a debug build is NOT the same on all platforms
@@ -107,22 +123,32 @@
# client and server subdirectories have symbolic links to ../libjsig.so
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.so
+EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
-EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
+ifndef BUILD_CLIENT_ONLY
EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.so
+endif
+
ifneq ($(ZERO_BUILD), true)
ifeq ($(ARCH_DATA_MODEL), 32)
EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.so
- EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.so
- EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar
- else
- ifeq ($(ARCH),ia64)
- else
- EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.so
- EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar
- endif
endif
endif
+
+# Serviceability Binaries
+# No SA Support for PPC, IA64, ARM or zero
+ADD_SA_BINARIES/x86 = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.so \
+ $(EXPORT_LIB_DIR)/sa-jdi.jar
+ADD_SA_BINARIES/sparc = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.so \
+ $(EXPORT_LIB_DIR)/sa-jdi.jar
+ADD_SA_BINARIES/ppc =
+ADD_SA_BINARIES/ia64 =
+ADD_SA_BINARIES/arm =
+ADD_SA_BINARIES/zero =
+
+EXPORT_LIST += $(ADD_SA_BINARIES/$(HS_ARCH))
+
+
--- a/hotspot/make/linux/makefiles/gcc.make Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/make/linux/makefiles/gcc.make Fri Aug 13 10:55:42 2010 -0700
@@ -1,5 +1,5 @@
#
-# Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,14 @@
#------------------------------------------------------------------------
# CC, CPP & AS
+ifdef ALT_COMPILER_PATH
+CPP = $(ALT_COMPILER_PATH)/g++
+CC = $(ALT_COMPILER_PATH)/gcc
+else
CPP = g++
CC = gcc
+endif
+
AS = $(CC) -c
# -dumpversion in gcc-2.91 shows "egcs-2.91.66". In later version, it only
@@ -55,6 +61,9 @@
ifeq ($(ZERO_BUILD), true)
CFLAGS += $(LIBFFI_CFLAGS)
endif
+ifeq ($(SHARK_BUILD), true)
+CFLAGS += $(LLVM_CFLAGS)
+endif
CFLAGS += $(VM_PICFLAG)
CFLAGS += -fno-rtti
CFLAGS += -fno-exceptions
@@ -67,18 +76,31 @@
ARCHFLAG/ia64 =
ARCHFLAG/sparc = -m32 -mcpu=v9
ARCHFLAG/sparcv9 = -m64 -mcpu=v9
+ARCHFLAG/arm = -fsigned-char
ARCHFLAG/zero = $(ZERO_ARCHFLAG)
+ifndef E500V2
+ARCHFLAG/ppc = -mcpu=powerpc
+endif
CFLAGS += $(ARCHFLAG)
AOUT_FLAGS += $(ARCHFLAG)
LFLAGS += $(ARCHFLAG)
ASFLAGS += $(ARCHFLAG)
+ifdef E500V2
+CFLAGS += -DE500V2
+endif
+
# Use C++ Interpreter
ifdef CC_INTERP
CFLAGS += -DCC_INTERP
endif
+# Build for embedded targets
+ifdef JAVASE_EMBEDDED
+ CFLAGS += -DJAVASE_EMBEDDED
+endif
+
# Keep temporary files (.ii, .s)
ifdef NEED_ASM
CFLAGS += -save-temps
@@ -171,6 +193,8 @@
# Note: The Itanium gcc compiler crashes when using -gstabs.
DEBUG_CFLAGS/ia64 = -g
DEBUG_CFLAGS/amd64 = -g
+DEBUG_CFLAGS/arm = -g
+DEBUG_CFLAGS/ppc = -g
DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
DEBUG_CFLAGS += -gstabs
@@ -181,3 +205,15 @@
DEBUG_CFLAGS = -g
CFLAGS += $(DEBUG_CFLAGS)
endif
+
+# If we are building HEADLESS, pass on to VM
+# so it can set the java.awt.headless property
+ifdef HEADLESS
+CFLAGS += -DHEADLESS
+endif
+
+# We are building Embedded for a small device
+# favor code space over speed
+ifdef MINIMIZE_RAM_USAGE
+CFLAGS += -DMINIMIZE_RAM_USAGE
+endif
--- a/hotspot/make/linux/makefiles/product.make Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/make/linux/makefiles/product.make Fri Aug 13 10:55:42 2010 -0700
@@ -46,7 +46,11 @@
# use -g to strip library as -x will discard its symbol table; -x is fine for
# executables.
-STRIP = strip
+ifdef CROSS_COMPILE_ARCH
+ STRIP = $(ALT_COMPILER_PATH)/strip
+else
+ STRIP = strip
+endif
STRIP_LIBJVM = $(STRIP) -g $@ || exit 1;
STRIP_AOUT = $(STRIP) -x $@ || exit 1;
--- a/hotspot/make/linux/makefiles/sa.make Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/make/linux/makefiles/sa.make Fri Aug 13 10:55:42 2010 -0700
@@ -55,10 +55,13 @@
SA_PROPERTIES = $(SA_CLASSDIR)/sa.properties
# if $(AGENT_DIR) does not exist, we don't build SA
-# also, we don't build SA on Itanium or zero.
+# also, we don't build SA on Itanium, PowerPC, ARM or zero.
all:
- if [ -d $(AGENT_DIR) -a "$(SRCARCH)" != "ia64" -a "$(SRCARCH)" != "zero" ] ; then \
+ if [ -d $(AGENT_DIR) -a "$(SRCARCH)" != "ia64" \
+ -a "$(SRCARCH)" != "arm" \
+ -a "$(SRCARCH)" != "ppc" \
+ -a "$(SRCARCH)" != "zero" ] ; then \
$(MAKE) -f sa.make $(GENERATED)/sa-jdi.jar; \
fi
--- a/hotspot/make/linux/makefiles/saproc.make Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/make/linux/makefiles/saproc.make Fri Aug 13 10:55:42 2010 -0700
@@ -53,10 +53,10 @@
endif
# if $(AGENT_DIR) does not exist, we don't build SA
-# also, we don't build SA on Itanium or zero.
+# also, we don't build SA on Itanium, PPC, ARM or zero.
checkAndBuildSA:
- $(QUIETLY) if [ -d $(AGENT_DIR) -a "$(SRCARCH)" != "ia64" -a "$(SRCARCH)" != "zero" ] ; then \
+ $(QUIETLY) if [ -d $(AGENT_DIR) -a "$(SRCARCH)" != "ia64" -a "$(SRCARCH)" != "arm" -a "$(SRCARCH)" != "ppc" -a "$(SRCARCH)" != "zero" ] ; then \
$(MAKE) -f vm.make $(LIBSAPROC); \
fi
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/make/linux/makefiles/shark.make Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,32 @@
+#
+# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2008, 2010 Red Hat, Inc.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# Sets make macros for making Shark version of VM
+
+TYPE = SHARK
+
+VM_SUBDIR = server
+
+CFLAGS += -DSHARK
--- a/hotspot/make/linux/makefiles/top.make Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/make/linux/makefiles/top.make Fri Aug 13 10:55:42 2010 -0700
@@ -1,5 +1,5 @@
#
-# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -75,6 +75,7 @@
Include_DBs/COMPILER2 = $(Include_DBs/CORE) $(VM)/includeDB_compiler2
Include_DBs/TIERED = $(Include_DBs/CORE) $(VM)/includeDB_compiler1 $(VM)/includeDB_compiler2
Include_DBs/ZERO = $(Include_DBs/CORE) $(VM)/includeDB_zero
+Include_DBs/SHARK = $(Include_DBs/ZERO) $(VM)/includeDB_shark
Include_DBs = $(Include_DBs/$(TYPE))
Cached_plat = $(GENERATED)/platform.current
--- a/hotspot/make/linux/makefiles/vm.make Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/make/linux/makefiles/vm.make Fri Aug 13 10:55:42 2010 -0700
@@ -1,5 +1,5 @@
#
-# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -98,6 +98,7 @@
# Extra flags from gnumake's invocation or environment
CFLAGS += $(EXTRA_CFLAGS)
+LFLAGS += $(EXTRA_CFLAGS)
LIBS += -lm -ldl -lpthread
@@ -136,10 +137,14 @@
vm.def: $(Res_Files) $(Obj_Files)
sh $(GAMMADIR)/make/linux/makefiles/build_vm_def.sh *.o > $@
-ifeq ($(ZERO_LIBARCH), ppc64)
+ifeq ($(SHARK_BUILD), true)
STATIC_CXX = false
else
- STATIC_CXX = true
+ ifeq ($(ZERO_LIBARCH), ppc64)
+ STATIC_CXX = false
+ else
+ STATIC_CXX = true
+ endif
endif
ifeq ($(LINK_INTO),AOUT)
@@ -167,6 +172,10 @@
ifeq ($(ZERO_BUILD), true)
LIBS_VM += $(LIBFFI_LIBS)
endif
+ifeq ($(SHARK_BUILD), true)
+ LFLAGS_VM += $(LLVM_LDFLAGS)
+ LIBS_VM += $(LLVM_LIBS)
+endif
LINK_VM = $(LINK_LIB.c)
@@ -210,15 +219,17 @@
$(LINK_LIB.CC/POST_HOOK) \
rm -f $@.1; ln -s $@ $@.1; \
[ -f $(LIBJVM_G) ] || { ln -s $@ $(LIBJVM_G); ln -s $@.1 $(LIBJVM_G).1; }; \
- if [ -x /usr/sbin/selinuxenabled ] ; then \
- /usr/sbin/selinuxenabled; \
- if [ $$? = 0 ] ; then \
- /usr/bin/chcon -t textrel_shlib_t $@; \
- if [ $$? != 0 ]; then \
- echo "ERROR: Cannot chcon $@"; \
- fi \
- fi \
- fi \
+ if [ \"$(CROSS_COMPILE_ARCH)\" = \"\" ] ; then \
+ if [ -x /usr/sbin/selinuxenabled ] ; then \
+ /usr/sbin/selinuxenabled; \
+ if [ $$? = 0 ] ; then \
+ /usr/bin/chcon -t textrel_shlib_t $@; \
+ if [ $$? != 0 ]; then \
+ echo "ERROR: Cannot chcon $@"; \
+ fi \
+ fi \
+ fi \
+ fi \
}
DEST_JVM = $(JDK_LIBDIR)/$(VM_SUBDIR)/$(LIBJVM)
--- a/hotspot/make/solaris/makefiles/defs.make Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/make/solaris/makefiles/defs.make Fri Aug 13 10:55:42 2010 -0700
@@ -70,20 +70,24 @@
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.so
EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
+ifneq ($(BUILD_CLIENT_ONLY),true)
EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.so
EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm_db.so
EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm_dtrace.so
+endif
ifeq ($(ARCH_DATA_MODEL), 32)
EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.so
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_db.so
- EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_dtrace.so
- EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_db.so
+ EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_dtrace.so
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_db.so
- EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_dtrace.so
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_dtrace.so
+ ifneq ($(BUILD_CLIENT_ONLY), true)
+ EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_db.so
+ EXPORT_LIST += $(EXPORT_SERVER_DIR)/64/libjvm_dtrace.so
+ endif
endif
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.so
--- a/hotspot/make/solaris/makefiles/sparcWorks.make Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/make/solaris/makefiles/sparcWorks.make Fri Aug 13 10:55:42 2010 -0700
@@ -145,11 +145,20 @@
OPT_CFLAGS/O2=-xO2
OPT_CFLAGS/NOOPT=-xO1
+#################################################
+# Begin current (>=5.9) Forte compiler options #
+#################################################
+
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
ifeq ($(Platform_arch), x86)
OPT_CFLAGS/NO_TAIL_CALL_OPT = -Wu,-O~yz
OPT_CCFLAGS/NO_TAIL_CALL_OPT = -Qoption ube -O~yz
+OPT_CFLAGS/stubGenerator_x86_32.o = $(OPT_CFLAGS) -xspace
+OPT_CFLAGS/stubGenerator_x86_64.o = $(OPT_CFLAGS) -xspace
endif # Platform_arch == x86
+ifeq ("${Platform_arch}", "sparc")
+OPT_CFLAGS/stubGenerator_sparc.o = $(OPT_CFLAGS) -xspace
+endif
endif # COMPILER_REV_NUMERIC >= 509
#################################################
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -626,7 +626,7 @@
}
// This code sequence is relocatable to any address, even on LP64.
-void MacroAssembler::jumpl(AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) {
+void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) {
assert_not_delayed();
// Force fixed length sethi because NativeJump and NativeFarCall don't handle
// variable length instruction streams.
@@ -672,7 +672,7 @@
}
}
-void MacroAssembler::jump(AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) {
+void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) {
jumpl(addrlit, temp, G0, offset, file, line);
}
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -1974,12 +1974,12 @@
// address pseudos: make these names unlike instruction names to avoid confusion
inline intptr_t load_pc_address( Register reg, int bytes_to_skip );
- inline void load_contents(AddressLiteral& addrlit, Register d, int offset = 0);
- inline void load_ptr_contents(AddressLiteral& addrlit, Register d, int offset = 0);
- inline void store_contents(Register s, AddressLiteral& addrlit, Register temp, int offset = 0);
- inline void store_ptr_contents(Register s, AddressLiteral& addrlit, Register temp, int offset = 0);
- inline void jumpl_to(AddressLiteral& addrlit, Register temp, Register d, int offset = 0);
- inline void jump_to(AddressLiteral& addrlit, Register temp, int offset = 0);
+ inline void load_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
+ inline void load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
+ inline void store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
+ inline void store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
+ inline void jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset = 0);
+ inline void jump_to(const AddressLiteral& addrlit, Register temp, int offset = 0);
inline void jump_indirect_to(Address& a, Register temp, int ld_offset = 0, int jmp_offset = 0);
// ring buffer traceable jumps
@@ -1987,8 +1987,8 @@
void jmp2( Register r1, Register r2, const char* file, int line );
void jmp ( Register r1, int offset, const char* file, int line );
- void jumpl(AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line);
- void jump (AddressLiteral& addrlit, Register temp, int offset, const char* file, int line);
+ void jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line);
+ void jump (const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line);
// argument pseudos:
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.inline.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.inline.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -650,28 +650,28 @@
}
-inline void MacroAssembler::load_contents(AddressLiteral& addrlit, Register d, int offset) {
+inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed();
sethi(addrlit, d);
ld(d, addrlit.low10() + offset, d);
}
-inline void MacroAssembler::load_ptr_contents(AddressLiteral& addrlit, Register d, int offset) {
+inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed();
sethi(addrlit, d);
ld_ptr(d, addrlit.low10() + offset, d);
}
-inline void MacroAssembler::store_contents(Register s, AddressLiteral& addrlit, Register temp, int offset) {
+inline void MacroAssembler::store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
assert_not_delayed();
sethi(addrlit, temp);
st(s, temp, addrlit.low10() + offset);
}
-inline void MacroAssembler::store_ptr_contents(Register s, AddressLiteral& addrlit, Register temp, int offset) {
+inline void MacroAssembler::store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
assert_not_delayed();
sethi(addrlit, temp);
st_ptr(s, temp, addrlit.low10() + offset);
@@ -679,7 +679,7 @@
// This code sequence is relocatable to any address, even on LP64.
-inline void MacroAssembler::jumpl_to(AddressLiteral& addrlit, Register temp, Register d, int offset) {
+inline void MacroAssembler::jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset) {
assert_not_delayed();
// Force fixed length sethi because NativeJump and NativeFarCall don't handle
// variable length instruction streams.
@@ -688,7 +688,7 @@
}
-inline void MacroAssembler::jump_to(AddressLiteral& addrlit, Register temp, int offset) {
+inline void MacroAssembler::jump_to(const AddressLiteral& addrlit, Register temp, int offset) {
jumpl_to(addrlit, temp, G0, offset);
}
--- a/hotspot/src/cpu/sparc/vm/bytecodeInterpreter_sparc.inline.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/bytecodeInterpreter_sparc.inline.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -236,19 +236,19 @@
}
inline jint BytecodeInterpreter::VMintShl(jint op1, jint op2) {
- return op1 << op2;
+ return op1 << (op2 & 0x1f);
}
inline jint BytecodeInterpreter::VMintShr(jint op1, jint op2) {
- return op1 >> op2; // QQ op2 & 0x1f??
+ return op1 >> (op2 & 0x1f);
}
inline jint BytecodeInterpreter::VMintSub(jint op1, jint op2) {
return op1 - op2;
}
-inline jint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
- return ((juint) op1) >> op2; // QQ op2 & 0x1f??
+inline juint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
+ return ((juint) op1) >> (op2 & 0x1f);
}
inline jint BytecodeInterpreter::VMintXor(jint op1, jint op2) {
--- a/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -409,7 +409,7 @@
LIR_Opr lock = FrameMap::G1_opr;
LIR_Opr hdr = FrameMap::G3_opr;
LIR_Opr obj_temp = FrameMap::G4_opr;
- monitor_exit(obj_temp, lock, hdr, x->monitor_no());
+ monitor_exit(obj_temp, lock, hdr, LIR_OprFact::illegalOpr, x->monitor_no());
}
--- a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -1031,3 +1031,7 @@
#undef __
#define __ masm->
+
+const char *Runtime1::pd_name_for_address(address entry) {
+ return "<unknown function>";
+}
--- a/hotspot/src/cpu/sparc/vm/interpreterRT_sparc.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/interpreterRT_sparc.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -56,14 +56,18 @@
}
-#ifdef _LP64
void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
Argument jni_arg(jni_offset(), false);
+#ifdef _LP64
FloatRegister Rtmp = F0;
__ ldf(FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp);
__ store_float_argument(Rtmp, jni_arg);
+#else
+ Register Rtmp = O0;
+ __ ld(Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp);
+ __ store_argument(Rtmp, jni_arg);
+#endif
}
-#endif
void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
@@ -185,6 +189,13 @@
_from -= 2*Interpreter::stackElementSize;
add_signature( non_float );
}
+
+ virtual void pass_float() {
+ *_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
+ _from -= Interpreter::stackElementSize;
+ add_signature( non_float );
+ }
+
#endif // _LP64
virtual void add_signature( intptr_t sig_type ) {
--- a/hotspot/src/cpu/sparc/vm/javaFrameAnchor_sparc.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/javaFrameAnchor_sparc.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -76,6 +76,8 @@
void set_last_Java_sp(intptr_t* sp) { _last_Java_sp = sp; }
+ address last_Java_pc(void) { return _last_Java_pc; }
+
// These are only used by friends
private:
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -3236,12 +3236,14 @@
__ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned);
__ get_cpool_and_tags(Rscratch, G3_scratch);
// make sure the class we're about to instantiate has been resolved
+ // This is done before loading instanceKlass to be consistent with the order
+ // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put)
__ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch);
__ ldub(G3_scratch, Roffset, G3_scratch);
__ cmp(G3_scratch, JVM_CONSTANT_Class);
__ br(Assembler::notEqual, false, Assembler::pn, slow_case);
__ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
-
+ // get instanceKlass
//__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot
__ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
__ ld_ptr(Rscratch, Roffset, RinstanceKlass);
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -7151,7 +7151,7 @@
subptr(t1, typeArrayOopDesc::header_size(T_INT));
addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve());
shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint)));
- movptr(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
+ movl(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
// set klass to intArrayKlass
// dubious reloc why not an oop reloc?
movptr(t1, ExternalAddress((address) Universe::intArrayKlassObj_addr()));
@@ -7568,21 +7568,27 @@
// Scan RCX words at [RDI] for an occurrence of RAX.
// Set NZ/Z based on last compare.
+ // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
+ // not change flags (only scas instruction which is repeated sets flags).
+ // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
#ifdef _LP64
// This part is tricky, as values in supers array could be 32 or 64 bit wide
// and we store values in objArrays always encoded, thus we need to encode
// the value of rax before repne. Note that rax is dead after the repne.
if (UseCompressedOops) {
- encode_heap_oop_not_null(rax);
+ encode_heap_oop_not_null(rax); // Changes flags.
// The superclass is never null; it would be a basic system error if a null
// pointer were to sneak in here. Note that we have already loaded the
// Klass::super_check_offset from the super_klass in the fast path,
// so if there is a null in that register, we are already in the afterlife.
+ testl(rax,rax); // Set Z = 0
repne_scanl();
} else
#endif // _LP64
+ {
+ testptr(rax,rax); // Set Z = 0
repne_scan();
-
+ }
// Unspill the temp. registers:
if (pushed_rdi) pop(rdi);
if (pushed_rcx) pop(rcx);
@@ -8257,30 +8263,35 @@
}
}
+#ifdef ASSERT
+void MacroAssembler::verify_heapbase(const char* msg) {
+ assert (UseCompressedOops, "should be compressed");
+ assert (Universe::heap() != NULL, "java heap should be initialized");
+ if (CheckCompressedOops) {
+ Label ok;
+ push(rscratch1); // cmpptr trashes rscratch1
+ cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr()));
+ jcc(Assembler::equal, ok);
+ stop(msg);
+ bind(ok);
+ pop(rscratch1);
+ }
+}
+#endif
+
// Algorithm must match oop.inline.hpp encode_heap_oop.
void MacroAssembler::encode_heap_oop(Register r) {
- assert (UseCompressedOops, "should be compressed");
- assert (Universe::heap() != NULL, "java heap should be initialized");
+#ifdef ASSERT
+ verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
+#endif
+ verify_oop(r, "broken oop in encode_heap_oop");
if (Universe::narrow_oop_base() == NULL) {
- verify_oop(r, "broken oop in encode_heap_oop");
if (Universe::narrow_oop_shift() != 0) {
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
shrq(r, LogMinObjAlignmentInBytes);
}
return;
}
-#ifdef ASSERT
- if (CheckCompressedOops) {
- Label ok;
- push(rscratch1); // cmpptr trashes rscratch1
- cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr()));
- jcc(Assembler::equal, ok);
- stop("MacroAssembler::encode_heap_oop: heap base corrupted?");
- bind(ok);
- pop(rscratch1);
- }
-#endif
- verify_oop(r, "broken oop in encode_heap_oop");
testq(r, r);
cmovq(Assembler::equal, r, r12_heapbase);
subq(r, r12_heapbase);
@@ -8288,9 +8299,8 @@
}
void MacroAssembler::encode_heap_oop_not_null(Register r) {
- assert (UseCompressedOops, "should be compressed");
- assert (Universe::heap() != NULL, "java heap should be initialized");
#ifdef ASSERT
+ verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
if (CheckCompressedOops) {
Label ok;
testq(r, r);
@@ -8310,9 +8320,8 @@
}
void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
- assert (UseCompressedOops, "should be compressed");
- assert (Universe::heap() != NULL, "java heap should be initialized");
#ifdef ASSERT
+ verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
if (CheckCompressedOops) {
Label ok;
testq(src, src);
@@ -8335,40 +8344,21 @@
}
void MacroAssembler::decode_heap_oop(Register r) {
- assert (UseCompressedOops, "should be compressed");
- assert (Universe::heap() != NULL, "java heap should be initialized");
+#ifdef ASSERT
+ verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
+#endif
if (Universe::narrow_oop_base() == NULL) {
if (Universe::narrow_oop_shift() != 0) {
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
shlq(r, LogMinObjAlignmentInBytes);
}
- verify_oop(r, "broken oop in decode_heap_oop");
- return;
- }
-#ifdef ASSERT
- if (CheckCompressedOops) {
- Label ok;
- push(rscratch1);
- cmpptr(r12_heapbase,
- ExternalAddress((address)Universe::narrow_oop_base_addr()));
- jcc(Assembler::equal, ok);
- stop("MacroAssembler::decode_heap_oop: heap base corrupted?");
- bind(ok);
- pop(rscratch1);
- }
-#endif
-
- Label done;
- shlq(r, LogMinObjAlignmentInBytes);
- jccb(Assembler::equal, done);
- addq(r, r12_heapbase);
-#if 0
- // alternate decoding probably a wash.
- testq(r, r);
- jccb(Assembler::equal, done);
- leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
-#endif
- bind(done);
+ } else {
+ Label done;
+ shlq(r, LogMinObjAlignmentInBytes);
+ jccb(Assembler::equal, done);
+ addq(r, r12_heapbase);
+ bind(done);
+ }
verify_oop(r, "broken oop in decode_heap_oop");
}
@@ -8410,9 +8400,11 @@
addq(dst, r12_heapbase);
}
}
- } else if (dst != src) {
+ } else {
assert (Universe::narrow_oop_base() == NULL, "sanity");
- movq(dst, src);
+ if (dst != src) {
+ movq(dst, src);
+ }
}
}
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -1714,6 +1714,9 @@
// if heap base register is used - reinit it with the correct value
void reinit_heapbase();
+
+ DEBUG_ONLY(void verify_heapbase(const char* msg);)
+
#endif // _LP64
// Int division/remainder for Java
--- a/hotspot/src/cpu/x86/vm/bytecodeInterpreter_x86.inline.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/bytecodeInterpreter_x86.inline.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -236,11 +236,11 @@
}
inline jint BytecodeInterpreter::VMintShl(jint op1, jint op2) {
- return op1 << op2;
+ return op1 << op2;
}
inline jint BytecodeInterpreter::VMintShr(jint op1, jint op2) {
- return op1 >> op2; // QQ op2 & 0x1f??
+ return op1 >> (op2 & 0x1f);
}
inline jint BytecodeInterpreter::VMintSub(jint op1, jint op2) {
@@ -248,7 +248,7 @@
}
inline jint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
- return ((juint) op1) >> op2; // QQ op2 & 0x1f??
+ return ((juint) op1) >> (op2 & 0x1f);
}
inline jint BytecodeInterpreter::VMintXor(jint op1, jint op2) {
--- a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -349,7 +349,7 @@
LIR_Opr lock = new_register(T_INT);
LIR_Opr obj_temp = new_register(T_INT);
set_no_result(x);
- monitor_exit(obj_temp, lock, syncTempOpr(), x->monitor_no());
+ monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
}
--- a/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -1779,3 +1779,7 @@
}
#undef __
+
+const char *Runtime1::pd_name_for_address(address entry) {
+ return "<unknown function>";
+}
--- a/hotspot/src/cpu/x86/vm/frame_x86.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/frame_x86.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -575,8 +575,8 @@
BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
#ifdef CC_INTERP
- // Needed for JVMTI. The result should always be in the interpreterState object
- assert(false, "NYI");
+ // Needed for JVMTI. The result should always be in the
+ // interpreterState object
interpreterState istate = get_interpreterState();
#endif // CC_INTERP
assert(is_interpreted_frame(), "interpreted frame expected");
--- a/hotspot/src/cpu/x86/vm/interpreterRT_x86_32.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/interpreterRT_x86_32.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -34,6 +34,10 @@
move(offset(), jni_offset() + 1);
}
+void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
+ move(offset(), jni_offset() + 1);
+}
+
void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
move(offset(), jni_offset() + 2);
move(offset() + 1, jni_offset() + 1);
@@ -91,6 +95,11 @@
_from -= Interpreter::stackElementSize;
}
+ virtual void pass_float() {
+ *_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
+ _from -= Interpreter::stackElementSize;
+ }
+
virtual void pass_long() {
_to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
_to[1] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
--- a/hotspot/src/cpu/x86/vm/javaFrameAnchor_x86.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/javaFrameAnchor_x86.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -66,6 +66,8 @@
intptr_t* last_Java_sp(void) const { return _last_Java_sp; }
+ address last_Java_pc(void) { return _last_Java_pc; }
+
private:
static ByteSize last_Java_fp_offset() { return byte_offset_of(JavaFrameAnchor, _last_Java_fp); }
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -3112,22 +3112,25 @@
transition(vtos, atos);
__ get_unsigned_2_byte_index_at_bcp(rdx, 1);
Label slow_case;
+ Label slow_case_no_pop;
Label done;
Label initialize_header;
Label initialize_object; // including clearing the fields
Label allocate_shared;
__ get_cpool_and_tags(rcx, rax);
+
+ // Make sure the class we're about to instantiate has been resolved.
+ // This is done before loading instanceKlass to be consistent with the order
+ // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put)
+ const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
+ __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
+ __ jcc(Assembler::notEqual, slow_case_no_pop);
+
// get instanceKlass
__ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(constantPoolOopDesc)));
__ push(rcx); // save the contexts of klass for initializing the header
- // make sure the class we're about to instantiate has been resolved.
- // Note: slow_case does a pop of stack, which is why we loaded class/pushed above
- const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
- __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
- __ jcc(Assembler::notEqual, slow_case);
-
// make sure klass is initialized & doesn't have finalizer
// make sure klass is fully initialized
__ cmpl(Address(rcx, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
@@ -3255,6 +3258,7 @@
// slow case
__ bind(slow_case);
__ pop(rcx); // restore stack pointer to what it was when we came in.
+ __ bind(slow_case_no_pop);
__ get_constant_pool(rax);
__ get_unsigned_2_byte_index_at_bcp(rdx, 1);
call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rax, rdx);
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -3126,18 +3126,18 @@
Label allocate_shared;
__ get_cpool_and_tags(rsi, rax);
- // get instanceKlass
- __ movptr(rsi, Address(rsi, rdx,
- Address::times_8, sizeof(constantPoolOopDesc)));
-
- // make sure the class we're about to instantiate has been
- // resolved. Note: slow_case does a pop of stack, which is why we
- // loaded class/pushed above
+ // Make sure the class we're about to instantiate has been resolved.
+ // This is done before loading instanceKlass to be consistent with the order
+ // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put)
const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
__ cmpb(Address(rax, rdx, Address::times_1, tags_offset),
JVM_CONSTANT_Class);
__ jcc(Assembler::notEqual, slow_case);
+ // get instanceKlass
+ __ movptr(rsi, Address(rsi, rdx,
+ Address::times_8, sizeof(constantPoolOopDesc)));
+
// make sure klass is initialized & doesn't have finalizer
// make sure klass is fully initialized
__ cmpl(Address(rsi,
--- a/hotspot/src/cpu/zero/vm/disassembler_zero.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/cpu/zero/vm/disassembler_zero.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007 Red Hat, Inc.
+ * Copyright 2007, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,13 +23,10 @@
*
*/
-// The disassembler prints out zero code annotated
-// with Java specific information.
-
static int pd_instruction_alignment() {
- ShouldNotCallThis();
+ return 1;
}
static const char* pd_cpu_opts() {
- ShouldNotCallThis();
+ return "";
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/zero/vm/shark_globals_zero.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009, 2010 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// Set the default values for platform dependent flags used by the
+// Shark compiler. See globals.hpp for details of what they do.
+
+define_pd_global(bool, BackgroundCompilation, true );
+define_pd_global(bool, UseTLAB, true );
+define_pd_global(bool, ResizeTLAB, true );
+define_pd_global(bool, InlineIntrinsics, false);
+define_pd_global(bool, PreferInterpreterNativeStubs, false);
+define_pd_global(bool, ProfileTraps, false);
+define_pd_global(bool, UseOnStackReplacement, true );
+define_pd_global(bool, TieredCompilation, false);
+
+define_pd_global(intx, CompileThreshold, 1500);
+define_pd_global(intx, Tier2CompileThreshold, 1500);
+define_pd_global(intx, Tier3CompileThreshold, 2500);
+define_pd_global(intx, Tier4CompileThreshold, 4500);
+
+define_pd_global(intx, BackEdgeThreshold, 100000);
+define_pd_global(intx, Tier2BackEdgeThreshold, 100000);
+define_pd_global(intx, Tier3BackEdgeThreshold, 100000);
+define_pd_global(intx, Tier4BackEdgeThreshold, 100000);
+
+define_pd_global(intx, OnStackReplacePercentage, 933 );
+define_pd_global(intx, FreqInlineSize, 325 );
+define_pd_global(intx, InlineSmallCode, 1000 );
+define_pd_global(intx, NewRatio, 12 );
+define_pd_global(intx, NewSizeThreadIncrease, 4*K );
+define_pd_global(intx, InitialCodeCacheSize, 160*K);
+define_pd_global(intx, ReservedCodeCacheSize, 32*M );
+define_pd_global(bool, ProfileInterpreter, false);
+define_pd_global(intx, CodeCacheExpansionSize, 32*K );
+define_pd_global(uintx, CodeCacheMinBlockLength, 1 );
+define_pd_global(uintx, PermSize, 12*M );
+define_pd_global(uintx, MaxPermSize, 64*M );
+define_pd_global(bool, NeverActAsServerClassMachine, true );
+define_pd_global(uint64_t, MaxRAM, 1ULL*G);
+define_pd_global(bool, CICompileOSR, true );
--- a/hotspot/src/os/linux/launcher/java_md.c Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/os/linux/launcher/java_md.c Fri Aug 13 10:55:42 2010 -0700
@@ -79,6 +79,10 @@
# define ARCH "i386"
# elif defined(__sparc)
# define ARCH "sparc"
+# elif defined(arm)
+# define ARCH "arm"
+# elif defined(PPC)
+# define ARCH "ppc"
# endif
#endif /* _LP64 */
--- a/hotspot/src/os/linux/vm/attachListener_linux.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/os/linux/vm/attachListener_linux.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -32,11 +32,15 @@
#include <sys/un.h>
#include <sys/stat.h>
+#ifndef UNIX_PATH_MAX
+#define UNIX_PATH_MAX sizeof(((struct sockaddr_un *)0)->sun_path)
+#endif
+
// The attach mechanism on Linux uses a UNIX domain socket. An attach listener
// thread is created at startup or is created on-demand via a signal from
// the client tool. The attach listener creates a socket and binds it to a file
// in the filesystem. The attach listener then acts as a simple (single-
-// threaded) server - tt waits for a client to connect, reads the request,
+// threaded) server - it waits for a client to connect, reads the request,
// executes it, and returns the response to the client via the socket
// connection.
//
@@ -54,7 +58,7 @@
class LinuxAttachListener: AllStatic {
private:
// the path to which we bind the UNIX domain socket
- static char _path[PATH_MAX+1];
+ static char _path[UNIX_PATH_MAX];
static bool _has_path;
// the file descriptor for the listening socket
@@ -64,8 +68,8 @@
if (path == NULL) {
_has_path = false;
} else {
- strncpy(_path, path, PATH_MAX);
- _path[PATH_MAX] = '\0';
+ strncpy(_path, path, UNIX_PATH_MAX);
+ _path[UNIX_PATH_MAX-1] = '\0';
_has_path = true;
}
}
@@ -113,7 +117,7 @@
};
// statics
-char LinuxAttachListener::_path[PATH_MAX+1];
+char LinuxAttachListener::_path[UNIX_PATH_MAX];
bool LinuxAttachListener::_has_path;
int LinuxAttachListener::_listener = -1;
@@ -163,54 +167,53 @@
// Initialization - create a listener socket and bind it to a file
int LinuxAttachListener::init() {
- char path[PATH_MAX+1]; // socket file
- int listener; // listener socket (file descriptor)
+ char path[UNIX_PATH_MAX]; // socket file
+ char initial_path[UNIX_PATH_MAX]; // socket file during setup
+ int listener; // listener socket (file descriptor)
// register function to cleanup
::atexit(listener_cleanup);
+ int n = snprintf(path, UNIX_PATH_MAX, "%s/.java_pid%d",
+ os::get_temp_directory(), os::current_process_id());
+ if (n <= (int)UNIX_PATH_MAX) {
+ n = snprintf(initial_path, UNIX_PATH_MAX, "%s.tmp", path);
+ }
+ if (n > (int)UNIX_PATH_MAX) {
+ return -1;
+ }
+
// create the listener socket
listener = ::socket(PF_UNIX, SOCK_STREAM, 0);
if (listener == -1) {
return -1;
}
- int res = -1;
+ // bind socket
struct sockaddr_un addr;
addr.sun_family = AF_UNIX;
-
- // FIXME: Prior to b39 the tool-side API expected to find the well
- // known file in the working directory. To allow this libjvm.so work with
- // a pre-b39 SDK we create it in the working directory if
- // +StartAttachListener is used is used. All unit tests for this feature
- // currently used this flag. Once b39 SDK has been promoted we can remove
- // this code.
- if (StartAttachListener) {
- sprintf(path, ".java_pid%d", os::current_process_id());
- strcpy(addr.sun_path, path);
- ::unlink(path);
- res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
- }
- if (res == -1) {
- snprintf(path, PATH_MAX+1, "%s/.java_pid%d",
- os::get_temp_directory(), os::current_process_id());
- strcpy(addr.sun_path, path);
- ::unlink(path);
- res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
- }
+ strcpy(addr.sun_path, initial_path);
+ ::unlink(initial_path);
+ int res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
if (res == -1) {
RESTARTABLE(::close(listener), res);
return -1;
}
- set_path(path);
- // put in listen mode and set permission
- if ((::listen(listener, 5) == -1) || (::chmod(path, S_IREAD|S_IWRITE) == -1)) {
+ // put in listen mode, set permissions, and rename into place
+ res = ::listen(listener, 5);
+ if (res == 0) {
+ RESTARTABLE(::chmod(initial_path, S_IREAD|S_IWRITE), res);
+ if (res == 0) {
+ res = ::rename(initial_path, path);
+ }
+ }
+ if (res == -1) {
RESTARTABLE(::close(listener), res);
- ::unlink(path);
- set_path(NULL);
+ ::unlink(initial_path);
return -1;
}
+ set_path(path);
set_listener(listener);
return 0;
--- a/hotspot/src/os/linux/vm/globals_linux.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/os/linux/vm/globals_linux.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -29,9 +29,10 @@
product(bool, UseOprofile, false, \
"enable support for Oprofile profiler") \
\
- product(bool, UseLinuxPosixThreadCPUClocks, false, \
- "enable fast Linux Posix clocks where available") \
-
+ product(bool, UseLinuxPosixThreadCPUClocks, true, \
+ "enable fast Linux Posix clocks where available")
+// NB: The default value of UseLinuxPosixThreadCPUClocks may be
+// overridden in Arguments::parse_each_vm_init_arg.
//
// Defines Linux-specific default values. The flags are available on all
--- a/hotspot/src/os/linux/vm/os_linux.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/os/linux/vm/os_linux.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -30,6 +30,8 @@
// put OS-includes here
# include <sys/types.h>
# include <sys/mman.h>
+# include <sys/stat.h>
+# include <sys/select.h>
# include <pthread.h>
# include <signal.h>
# include <errno.h>
@@ -188,6 +190,10 @@
static char cpu_arch[] = "i386";
#elif defined(AMD64)
static char cpu_arch[] = "amd64";
+#elif defined(ARM)
+static char cpu_arch[] = "arm";
+#elif defined(PPC)
+static char cpu_arch[] = "ppc";
#elif defined(SPARC)
# ifdef _LP64
static char cpu_arch[] = "sparcv9";
@@ -1137,8 +1143,8 @@
long it_real;
uintptr_t start;
uintptr_t vsize;
- uintptr_t rss;
- unsigned long rsslim;
+ intptr_t rss;
+ uintptr_t rsslim;
uintptr_t scodes;
uintptr_t ecode;
int i;
@@ -1168,12 +1174,12 @@
// Skip blank chars
do s++; while (isspace(*s));
- /* 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 */
- /* 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 */
- i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld "
- UINTX_FORMAT UINTX_FORMAT UINTX_FORMAT
- " %lu "
- UINTX_FORMAT UINTX_FORMAT UINTX_FORMAT,
+#define _UFM UINTX_FORMAT
+#define _DFM INTX_FORMAT
+
+ /* 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 */
+ /* 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 */
+ i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld " _UFM _UFM _DFM _UFM _UFM _UFM _UFM,
&state, /* 3 %c */
&ppid, /* 4 %d */
&pgrp, /* 5 %d */
@@ -1193,15 +1199,18 @@
&nice, /* 19 %ld */
&junk, /* 20 %ld */
&it_real, /* 21 %ld */
- &start, /* 22 UINTX_FORMAT */
- &vsize, /* 23 UINTX_FORMAT */
- &rss, /* 24 UINTX_FORMAT */
- &rsslim, /* 25 %lu */
- &scodes, /* 26 UINTX_FORMAT */
- &ecode, /* 27 UINTX_FORMAT */
- &stack_start); /* 28 UINTX_FORMAT */
+ &start, /* 22 UINTX_FORMAT */
+ &vsize, /* 23 UINTX_FORMAT */
+ &rss, /* 24 INTX_FORMAT */
+ &rsslim, /* 25 UINTX_FORMAT */
+ &scodes, /* 26 UINTX_FORMAT */
+ &ecode, /* 27 UINTX_FORMAT */
+ &stack_start); /* 28 UINTX_FORMAT */
}
+#undef _UFM
+#undef _DFM
+
if (i != 28 - 2) {
assert(false, "Bad conversion from /proc/self/stat");
// product mode - assume we are the initial thread, good luck in the
@@ -1336,14 +1345,16 @@
#if defined(IA32) || defined(AMD64)
#define SYS_clock_getres IA32_ONLY(266) AMD64_ONLY(229)
+#define sys_clock_getres(x,y) ::syscall(SYS_clock_getres, x, y)
#else
-#error Value of SYS_clock_getres not known on this platform
+#warning "SYS_clock_getres not defined for this platform, disabling fast_thread_cpu_time"
+#define sys_clock_getres(x,y) -1
#endif
+#else
+#define sys_clock_getres(x,y) ::syscall(SYS_clock_getres, x, y)
#endif
-#define sys_clock_getres(x,y) ::syscall(SYS_clock_getres, x, y)
-
void os::Linux::fast_thread_clock_init() {
if (!UseLinuxPosixThreadCPUClocks) {
return;
@@ -1905,7 +1916,9 @@
!_print_ascii_file("/etc/SuSE-release", st) &&
!_print_ascii_file("/etc/turbolinux-release", st) &&
!_print_ascii_file("/etc/gentoo-release", st) &&
- !_print_ascii_file("/etc/debian_version", st)) {
+ !_print_ascii_file("/etc/debian_version", st) &&
+ !_print_ascii_file("/etc/ltib-release", st) &&
+ !_print_ascii_file("/etc/angstrom-version", st)) {
st->print("Linux");
}
st->cr();
@@ -1971,6 +1984,11 @@
os::loadavg(loadavg, 3);
st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
st->cr();
+
+ // meminfo
+ st->print("\n/proc/meminfo:\n");
+ _print_ascii_file("/proc/meminfo", st);
+ st->cr();
}
void os::print_memory_info(outputStream* st) {
@@ -2097,7 +2115,8 @@
CAST_FROM_FN_PTR(address, os::jvm_path),
dli_fname, sizeof(dli_fname), NULL);
assert(ret != 0, "cannot locate libjvm");
- if (realpath(dli_fname, buf) == NULL)
+ char *rp = realpath(dli_fname, buf);
+ if (rp == NULL)
return;
if (strcmp(Arguments::sun_java_launcher(), "gamma") == 0) {
@@ -2125,7 +2144,8 @@
assert(strstr(p, "/libjvm") == p, "invalid library name");
p = strstr(p, "_g") ? "_g" : "";
- if (realpath(java_home_var, buf) == NULL)
+ rp = realpath(java_home_var, buf);
+ if (rp == NULL)
return;
// determine if this is a legacy image or modules image
@@ -2147,7 +2167,8 @@
snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p);
} else {
// Go back to path of .so
- if (realpath(dli_fname, buf) == NULL)
+ rp = realpath(dli_fname, buf);
+ if (rp == NULL)
return;
}
}
@@ -2508,9 +2529,9 @@
unsigned long* os::Linux::_numa_all_nodes;
bool os::uncommit_memory(char* addr, size_t size) {
- return ::mmap(addr, size, PROT_NONE,
- MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0)
- != MAP_FAILED;
+ uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
+ MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
+ return res != (uintptr_t) MAP_FAILED;
}
// Linux uses a growable mapping for the stack, and if the mapping for
@@ -2718,7 +2739,8 @@
// the processor.
#ifndef ZERO
- _large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M);
+ _large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
+ ARM_ONLY(2 * M) PPC_ONLY(4 * M);
#endif // ZERO
FILE *fp = fopen("/proc/meminfo", "r");
@@ -3981,6 +4003,9 @@
return JNI_OK;
}
+// this is called at the end of vm_initialization
+void os::init_3(void) { }
+
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
if( !guard_memory((char*)_polling_page, Linux::page_size()) )
@@ -4061,7 +4086,6 @@
////////////////////////////////////////////////////////////////////////////////
// debug support
-#ifndef PRODUCT
static address same_page(address x, address y) {
int page_bits = -os::vm_page_size();
if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
@@ -4072,26 +4096,26 @@
return (address)(intptr_t(y) & page_bits);
}
-bool os::find(address addr) {
+bool os::find(address addr, outputStream* st) {
Dl_info dlinfo;
memset(&dlinfo, 0, sizeof(dlinfo));
if (dladdr(addr, &dlinfo)) {
- tty->print(PTR_FORMAT ": ", addr);
+ st->print(PTR_FORMAT ": ", addr);
if (dlinfo.dli_sname != NULL) {
- tty->print("%s+%#x", dlinfo.dli_sname,
+ st->print("%s+%#x", dlinfo.dli_sname,
addr - (intptr_t)dlinfo.dli_saddr);
} else if (dlinfo.dli_fname) {
- tty->print("<offset %#x>", addr - (intptr_t)dlinfo.dli_fbase);
+ st->print("<offset %#x>", addr - (intptr_t)dlinfo.dli_fbase);
} else {
- tty->print("<absolute address>");
+ st->print("<absolute address>");
}
if (dlinfo.dli_fname) {
- tty->print(" in %s", dlinfo.dli_fname);
+ st->print(" in %s", dlinfo.dli_fname);
}
if (dlinfo.dli_fbase) {
- tty->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
+ st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
}
- tty->cr();
+ st->cr();
if (Verbose) {
// decode some bytes around the PC
@@ -4104,15 +4128,13 @@
if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr
&& end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
end = (address) dlinfo2.dli_saddr;
- Disassembler::decode(begin, end);
+ Disassembler::decode(begin, end, st);
}
return true;
}
return false;
}
-#endif
-
////////////////////////////////////////////////////////////////////////////////
// misc
@@ -4321,6 +4343,7 @@
int count;
long sys_time, user_time;
char string[64];
+ char cdummy;
int idummy;
long ldummy;
FILE *fp;
@@ -4381,11 +4404,11 @@
// Skip blank chars
do s++; while (isspace(*s));
- count = sscanf(s,"%*c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
- &idummy, &idummy, &idummy, &idummy, &idummy,
+ count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
+ &cdummy, &idummy, &idummy, &idummy, &idummy, &idummy,
&ldummy, &ldummy, &ldummy, &ldummy, &ldummy,
&user_time, &sys_time);
- if ( count != 12 ) return -1;
+ if ( count != 13 ) return -1;
if (user_sys_cpu_time) {
return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec);
} else {
@@ -4980,3 +5003,43 @@
}
}
}
+
+// is_headless_jre()
+//
+// Test for the existence of libmawt in motif21 or xawt directories
+// in order to report if we are running in a headless jre
+//
+bool os::is_headless_jre() {
+ struct stat statbuf;
+ char buf[MAXPATHLEN];
+ char libmawtpath[MAXPATHLEN];
+ const char *xawtstr = "/xawt/libmawt.so";
+ const char *motifstr = "/motif21/libmawt.so";
+ char *p;
+
+ // Get path to libjvm.so
+ os::jvm_path(buf, sizeof(buf));
+
+ // Get rid of libjvm.so
+ p = strrchr(buf, '/');
+ if (p == NULL) return false;
+ else *p = '\0';
+
+ // Get rid of client or server
+ p = strrchr(buf, '/');
+ if (p == NULL) return false;
+ else *p = '\0';
+
+ // check xawt/libmawt.so
+ strcpy(libmawtpath, buf);
+ strcat(libmawtpath, xawtstr);
+ if (::stat(libmawtpath, &statbuf) == 0) return false;
+
+ // check motif21/libmawt.so
+ strcpy(libmawtpath, buf);
+ strcat(libmawtpath, motifstr);
+ if (::stat(libmawtpath, &statbuf) == 0) return false;
+
+ return true;
+}
+
--- a/hotspot/src/os/solaris/vm/attachListener_solaris.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/os/solaris/vm/attachListener_solaris.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -364,6 +364,7 @@
// Create the door
int SolarisAttachListener::create_door() {
char door_path[PATH_MAX+1];
+ char initial_path[PATH_MAX+1];
int fd, res;
// register exit function
@@ -375,36 +376,46 @@
return -1;
}
+ // create initial file to attach door descriptor
snprintf(door_path, sizeof(door_path), "%s/.java_pid%d",
os::get_temp_directory(), os::current_process_id());
- RESTARTABLE(::creat(door_path, S_IRUSR | S_IWUSR), fd);
-
+ snprintf(initial_path, sizeof(initial_path), "%s.tmp", door_path);
+ RESTARTABLE(::creat(initial_path, S_IRUSR | S_IWUSR), fd);
if (fd == -1) {
- debug_only(warning("attempt to create %s failed", door_path));
+ debug_only(warning("attempt to create %s failed", initial_path));
+ ::door_revoke(dd);
return -1;
}
assert(fd >= 0, "bad file descriptor");
- set_door_path(door_path);
RESTARTABLE(::close(fd), res);
// attach the door descriptor to the file
- if ((res = ::fattach(dd, door_path)) == -1) {
+ if ((res = ::fattach(dd, initial_path)) == -1) {
// if busy then detach and try again
if (errno == EBUSY) {
- ::fdetach(door_path);
- res = ::fattach(dd, door_path);
+ ::fdetach(initial_path);
+ res = ::fattach(dd, initial_path);
}
if (res == -1) {
::door_revoke(dd);
dd = -1;
}
}
+
+ // rename file so that clients can attach
+ if (dd >= 0) {
+ if (::rename(initial_path, door_path) == -1) {
+ RESTARTABLE(::close(dd), res);
+ ::fdetach(initial_path);
+ dd = -1;
+ }
+ }
if (dd >= 0) {
set_door_descriptor(dd);
+ set_door_path(door_path);
} else {
- // unable to create door or attach it to the file
- ::unlink(door_path);
- set_door_path(NULL);
+ // unable to create door, attach it to file, or rename file into place
+ ::unlink(initial_path);
return -1;
}
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -1839,8 +1839,8 @@
// Quietly truncate on buffer overflow. Should be an error.
if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
- *buffer = '\0';
- return;
+ *buffer = '\0';
+ return;
}
if (pnamelen == 0) {
@@ -2051,7 +2051,8 @@
{EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
{EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
{EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
- {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"}
+ {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
+ {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
};
#if (defined IA32)
@@ -2068,9 +2069,11 @@
static Elf32_Half running_arch_code=EM_PPC64;
#elif (defined __powerpc__)
static Elf32_Half running_arch_code=EM_PPC;
+ #elif (defined ARM)
+ static Elf32_Half running_arch_code=EM_ARM;
#else
#error Method os::dll_load requires that one of following is defined:\
- IA32, AMD64, IA64, __sparc, __powerpc__
+ IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
#endif
// Identify compatability class for VM's architecture and library's architecture
@@ -3149,7 +3152,8 @@
// ISM is only recommended on old Solaris where there is no MPSS support.
// Simply choose a conservative value as default.
*page_size = LargePageSizeInBytes ? LargePageSizeInBytes :
- SPARC_ONLY(4 * M) IA32_ONLY(4 * M) AMD64_ONLY(2 * M);
+ SPARC_ONLY(4 * M) IA32_ONLY(4 * M) AMD64_ONLY(2 * M)
+ ARM_ONLY(2 * M);
// ISM is available on all supported Solaris versions
return true;
@@ -5007,6 +5011,9 @@
return JNI_OK;
}
+void os::init_3(void) {
+ return;
+}
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
@@ -5412,7 +5419,6 @@
}
//---------------------------------------------------------------------------------
-#ifndef PRODUCT
static address same_page(address x, address y) {
intptr_t page_bits = -os::vm_page_size();
@@ -5424,28 +5430,28 @@
return (address)(intptr_t(y) & page_bits);
}
-bool os::find(address addr) {
+bool os::find(address addr, outputStream* st) {
Dl_info dlinfo;
memset(&dlinfo, 0, sizeof(dlinfo));
if (dladdr(addr, &dlinfo)) {
#ifdef _LP64
- tty->print("0x%016lx: ", addr);
+ st->print("0x%016lx: ", addr);
#else
- tty->print("0x%08x: ", addr);
+ st->print("0x%08x: ", addr);
#endif
if (dlinfo.dli_sname != NULL)
- tty->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
+ st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
else if (dlinfo.dli_fname)
- tty->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
+ st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
else
- tty->print("<absolute address>");
- if (dlinfo.dli_fname) tty->print(" in %s", dlinfo.dli_fname);
+ st->print("<absolute address>");
+ if (dlinfo.dli_fname) st->print(" in %s", dlinfo.dli_fname);
#ifdef _LP64
- if (dlinfo.dli_fbase) tty->print(" at 0x%016lx", dlinfo.dli_fbase);
+ if (dlinfo.dli_fbase) st->print(" at 0x%016lx", dlinfo.dli_fbase);
#else
- if (dlinfo.dli_fbase) tty->print(" at 0x%08x", dlinfo.dli_fbase);
+ if (dlinfo.dli_fbase) st->print(" at 0x%08x", dlinfo.dli_fbase);
#endif
- tty->cr();
+ st->cr();
if (Verbose) {
// decode some bytes around the PC
@@ -5458,16 +5464,13 @@
if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr
&& end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
end = (address) dlinfo2.dli_saddr;
- Disassembler::decode(begin, end);
+ Disassembler::decode(begin, end, st);
}
return true;
}
return false;
}
-#endif
-
-
// Following function has been added to support HotSparc's libjvm.so running
// under Solaris production JDK 1.2.2 / 1.3.0. These came from
// src/solaris/hpi/native_threads in the EVM codebase.
@@ -5910,7 +5913,6 @@
if (jt->handle_special_suspend_equivalent_condition()) {
jt->java_suspend_self();
}
-
OrderAccess::fence();
}
@@ -5997,3 +5999,44 @@
}
}
}
+
+// is_headless_jre()
+//
+// Test for the existence of libmawt in motif21 or xawt directories
+// in order to report if we are running in a headless jre
+//
+bool os::is_headless_jre() {
+ struct stat statbuf;
+ char buf[MAXPATHLEN];
+ char libmawtpath[MAXPATHLEN];
+ const char *xawtstr = "/xawt/libmawt.so";
+ const char *motifstr = "/motif21/libmawt.so";
+ char *p;
+
+ // Get path to libjvm.so
+ os::jvm_path(buf, sizeof(buf));
+
+ // Get rid of libjvm.so
+ p = strrchr(buf, '/');
+ if (p == NULL) return false;
+ else *p = '\0';
+
+ // Get rid of client or server
+ p = strrchr(buf, '/');
+ if (p == NULL) return false;
+ else *p = '\0';
+
+ // check xawt/libmawt.so
+ strcpy(libmawtpath, buf);
+ strcat(libmawtpath, xawtstr);
+ if (::stat(libmawtpath, &statbuf) == 0) return false;
+
+ // check motif21/libmawt.so
+ strcpy(libmawtpath, buf);
+ strcat(libmawtpath, motifstr);
+ if (::stat(libmawtpath, &statbuf) == 0) return false;
+
+ return true;
+}
+
+
--- a/hotspot/src/os/windows/vm/os_windows.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/os/windows/vm/os_windows.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -3444,6 +3444,9 @@
return JNI_OK;
}
+void os::init_3(void) {
+ return;
+}
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
@@ -4105,12 +4108,10 @@
}
-#ifndef PRODUCT
-bool os::find(address addr) {
+bool os::find(address addr, outputStream* st) {
// Nothing yet
return false;
}
-#endif
LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
DWORD exception_code = e->ExceptionRecord->ExceptionCode;
@@ -4164,3 +4165,8 @@
}
return 0;
}
+
+
+// We don't build a headless jre for Windows
+bool os::is_headless_jre() { return false; }
+
--- a/hotspot/src/os_cpu/linux_sparc/vm/thread_linux_sparc.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/os_cpu/linux_sparc/vm/thread_linux_sparc.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -105,3 +105,6 @@
// nothing else to try
return false;
}
+
+void JavaThread::cache_global_variables() { }
+
--- a/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -718,6 +718,11 @@
ucontext_t *uc = (ucontext_t*)context;
st->print_cr("Registers:");
+
+ // this is horrendously verbose but the layout of the registers in the
+ // context does not match how we defined our abstract Register set, so
+ // we can't just iterate through the gregs area
+
#ifdef AMD64
st->print( "RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]);
st->print(", RBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBX]);
@@ -745,6 +750,63 @@
st->print(", ERR=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ERR]);
st->cr();
st->print(" TRAPNO=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_TRAPNO]);
+
+ st->cr();
+ st->cr();
+
+ st->print_cr("Register to memory mapping:");
+ st->cr();
+
+ // this is only for the "general purpose" registers
+
+ st->print_cr("RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]);
+ print_location(st, uc->uc_mcontext.gregs[REG_RAX]);
+ st->cr();
+ st->print_cr("RBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBX]);
+ print_location(st, uc->uc_mcontext.gregs[REG_RBX]);
+ st->cr();
+ st->print_cr("RCX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RCX]);
+ print_location(st, uc->uc_mcontext.gregs[REG_RCX]);
+ st->cr();
+ st->print_cr("RDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDX]);
+ print_location(st, uc->uc_mcontext.gregs[REG_RDX]);
+ st->cr();
+ st->print_cr("RSP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSP]);
+ print_location(st, uc->uc_mcontext.gregs[REG_RSP]);
+ st->cr();
+ st->print_cr("RBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBP]);
+ print_location(st, uc->uc_mcontext.gregs[REG_RBP]);
+ st->cr();
+ st->print_cr("RSI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSI]);
+ print_location(st, uc->uc_mcontext.gregs[REG_RSI]);
+ st->cr();
+ st->print_cr("RDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDI]);
+ print_location(st, uc->uc_mcontext.gregs[REG_RDI]);
+ st->cr();
+ st->print_cr("R8 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]);
+ print_location(st, uc->uc_mcontext.gregs[REG_R8]);
+ st->cr();
+ st->print_cr("R9 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R9]);
+ print_location(st, uc->uc_mcontext.gregs[REG_R9]);
+ st->cr();
+ st->print_cr("R10=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R10]);
+ print_location(st, uc->uc_mcontext.gregs[REG_R10]);
+ st->cr();
+ st->print_cr("R11=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R11]);
+ print_location(st, uc->uc_mcontext.gregs[REG_R11]);
+ st->cr();
+ st->print_cr("R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]);
+ print_location(st, uc->uc_mcontext.gregs[REG_R12]);
+ st->cr();
+ st->print_cr("R13=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R13]);
+ print_location(st, uc->uc_mcontext.gregs[REG_R13]);
+ st->cr();
+ st->print_cr("R14=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R14]);
+ print_location(st, uc->uc_mcontext.gregs[REG_R14]);
+ st->cr();
+ st->print_cr("R15=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R15]);
+ print_location(st, uc->uc_mcontext.gregs[REG_R15]);
+
#else
st->print( "EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EAX]);
st->print(", EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBX]);
@@ -759,6 +821,39 @@
st->print( "EIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EIP]);
st->print(", CR2=" INTPTR_FORMAT, uc->uc_mcontext.cr2);
st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EFL]);
+
+ st->cr();
+ st->cr();
+
+ st->print_cr("Register to memory mapping:");
+ st->cr();
+
+ // this is only for the "general purpose" registers
+
+ st->print_cr("EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EAX]);
+ print_location(st, uc->uc_mcontext.gregs[REG_EAX]);
+ st->cr();
+ st->print_cr("EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBX]);
+ print_location(st, uc->uc_mcontext.gregs[REG_EBX]);
+ st->cr();
+ st->print_cr("ECX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ECX]);
+ print_location(st, uc->uc_mcontext.gregs[REG_ECX]);
+ st->cr();
+ st->print_cr("EDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EDX]);
+ print_location(st, uc->uc_mcontext.gregs[REG_EDX]);
+ st->cr();
+ st->print_cr("ESP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ESP]);
+ print_location(st, uc->uc_mcontext.gregs[REG_ESP]);
+ st->cr();
+ st->print_cr("EBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBP]);
+ print_location(st, uc->uc_mcontext.gregs[REG_EBP]);
+ st->cr();
+ st->print_cr("ESI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ESI]);
+ print_location(st, uc->uc_mcontext.gregs[REG_ESI]);
+ st->cr();
+ st->print_cr("EDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EDI]);
+ print_location(st, uc->uc_mcontext.gregs[REG_EDI]);
+
#endif // AMD64
st->cr();
st->cr();
--- a/hotspot/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -79,3 +79,6 @@
// nothing else to try
return false;
}
+
+void JavaThread::cache_global_variables() { }
+
--- a/hotspot/src/os_cpu/linux_zero/vm/thread_linux_zero.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/os_cpu/linux_zero/vm/thread_linux_zero.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -24,3 +24,5 @@
*/
// This file is intentionally empty
+
+void JavaThread::cache_global_variables() { }
--- a/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -587,6 +587,61 @@
st->print_cr(" PC=" INTPTR_FORMAT " nPC=" INTPTR_FORMAT,
uc->uc_mcontext.gregs[REG_PC],
uc->uc_mcontext.gregs[REG_nPC]);
+
+ st->cr();
+ st->cr();
+
+ st->print_cr("Register to memory mapping:");
+ st->cr();
+
+ // this is only for the "general purpose" registers
+
+ st->print_cr("O0=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_O0]);
+ print_location(st, uc->uc_mcontext.gregs[REG_O0]);
+ st->cr();
+ st->print_cr("O1=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_O1]);
+ print_location(st, uc->uc_mcontext.gregs[REG_O1]);
+ st->cr();
+ st->print_cr("O2=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_O2]);
+ print_location(st, uc->uc_mcontext.gregs[REG_O2]);
+ st->cr();
+ st->print_cr("O3=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_O3]);
+ print_location(st, uc->uc_mcontext.gregs[REG_O3]);
+ st->cr();
+ st->print_cr("O4=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_O4]);
+ print_location(st, uc->uc_mcontext.gregs[REG_O4]);
+ st->cr();
+ st->print_cr("O5=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_O5]);
+ print_location(st, uc->uc_mcontext.gregs[REG_O5]);
+ st->cr();
+ st->print_cr("O6=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_O6]);
+ print_location(st, uc->uc_mcontext.gregs[REG_O6]);
+ st->cr();
+ st->print_cr("O7=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_O7]);
+ print_location(st, uc->uc_mcontext.gregs[REG_O7]);
+ st->cr();
+
+ st->print_cr("G1=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_G1]);
+ print_location(st, uc->uc_mcontext.gregs[REG_G1]);
+ st->cr();
+ st->print_cr("G2=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_G2]);
+ print_location(st, uc->uc_mcontext.gregs[REG_G2]);
+ st->cr();
+ st->print_cr("G3=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_G3]);
+ print_location(st, uc->uc_mcontext.gregs[REG_G3]);
+ st->cr();
+ st->print_cr("G4=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_G4]);
+ print_location(st, uc->uc_mcontext.gregs[REG_G4]);
+ st->cr();
+ st->print_cr("G5=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_G5]);
+ print_location(st, uc->uc_mcontext.gregs[REG_G5]);
+ st->cr();
+ st->print_cr("G6=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_G6]);
+ print_location(st, uc->uc_mcontext.gregs[REG_G6]);
+ st->cr();
+ st->print_cr("G7=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_G7]);
+ print_location(st, uc->uc_mcontext.gregs[REG_G7]);
+
st->cr();
st->cr();
--- a/hotspot/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -140,3 +140,6 @@
*fr_addr = ret_frame;
return true;
}
+
+void JavaThread::cache_global_variables() { }
+
--- a/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -719,6 +719,11 @@
ucontext_t *uc = (ucontext_t*)context;
st->print_cr("Registers:");
+
+ // this is horrendously verbose but the layout of the registers in the
+ // context does not match how we defined our abstract Register set, so
+ // we can't just iterate through the gregs area
+
#ifdef AMD64
st->print( "RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]);
st->print(", RBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBX]);
@@ -742,6 +747,63 @@
st->cr();
st->print( "RIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RIP]);
st->print(", RFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RFL]);
+
+ st->cr();
+ st->cr();
+
+ st->print_cr("Register to memory mapping:");
+ st->cr();
+
+ // this is only for the "general purpose" registers
+
+ st->print_cr("RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]);
+ print_location(st, uc->uc_mcontext.gregs[REG_RAX]);
+ st->cr();
+ st->print_cr("RBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBX]);
+ print_location(st, uc->uc_mcontext.gregs[REG_RBX]);
+ st->cr();
+ st->print_cr("RCX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RCX]);
+ print_location(st, uc->uc_mcontext.gregs[REG_RCX]);
+ st->cr();
+ st->print_cr("RDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDX]);
+ print_location(st, uc->uc_mcontext.gregs[REG_RDX]);
+ st->cr();
+ st->print_cr("RSP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSP]);
+ print_location(st, uc->uc_mcontext.gregs[REG_RSP]);
+ st->cr();
+ st->print_cr("RBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBP]);
+ print_location(st, uc->uc_mcontext.gregs[REG_RSP]);
+ st->cr();
+ st->print_cr("RSI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSI]);
+ print_location(st, uc->uc_mcontext.gregs[REG_RSI]);
+ st->cr();
+ st->print_cr("RDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDI]);
+ print_location(st, uc->uc_mcontext.gregs[REG_RDI]);
+ st->cr();
+ st->print_cr("R8 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]);
+ print_location(st, uc->uc_mcontext.gregs[REG_R8]);
+ st->cr();
+ st->print_cr("R9 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R9]);
+ print_location(st, uc->uc_mcontext.gregs[REG_R9]);
+ st->cr();
+ st->print_cr("R10=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R10]);
+ print_location(st, uc->uc_mcontext.gregs[REG_R10]);
+ st->cr();
+ st->print_cr("R11=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R11]);
+ print_location(st, uc->uc_mcontext.gregs[REG_R11]);
+ st->cr();
+ st->print_cr("R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]);
+ print_location(st, uc->uc_mcontext.gregs[REG_R12]);
+ st->cr();
+ st->print_cr("R13=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R13]);
+ print_location(st, uc->uc_mcontext.gregs[REG_R13]);
+ st->cr();
+ st->print_cr("R14=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R14]);
+ print_location(st, uc->uc_mcontext.gregs[REG_R14]);
+ st->cr();
+ st->print_cr("R15=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R15]);
+ print_location(st, uc->uc_mcontext.gregs[REG_R15]);
+
#else
st->print( "EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EAX]);
st->print(", EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EBX]);
@@ -755,6 +817,39 @@
st->cr();
st->print( "EIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EIP]);
st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EFL]);
+
+ st->cr();
+ st->cr();
+
+ st->print_cr("Register to memory mapping:");
+ st->cr();
+
+ // this is only for the "general purpose" registers
+
+ st->print_cr("EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EAX]);
+ print_location(st, uc->uc_mcontext.gregs[EAX]);
+ st->cr();
+ st->print_cr("EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EBX]);
+ print_location(st, uc->uc_mcontext.gregs[EBX]);
+ st->cr();
+ st->print_cr("ECX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[ECX]);
+ print_location(st, uc->uc_mcontext.gregs[ECX]);
+ st->cr();
+ st->print_cr("EDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EDX]);
+ print_location(st, uc->uc_mcontext.gregs[EDX]);
+ st->cr();
+ st->print_cr("ESP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[UESP]);
+ print_location(st, uc->uc_mcontext.gregs[UESP]);
+ st->cr();
+ st->print_cr("EBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EBP]);
+ print_location(st, uc->uc_mcontext.gregs[EBP]);
+ st->cr();
+ st->print_cr("ESI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[ESI]);
+ print_location(st, uc->uc_mcontext.gregs[ESI]);
+ st->cr();
+ st->print_cr("EDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EDI]);
+ print_location(st, uc->uc_mcontext.gregs[EDI]);
+
#endif // AMD64
st->cr();
st->cr();
@@ -773,6 +868,7 @@
print_hex_dump(st, pc - 16, pc + 16, sizeof(char));
}
+
#ifdef AMD64
void os::Solaris::init_thread_fpu_state(void) {
// Nothing to do
--- a/hotspot/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -82,3 +82,6 @@
return true;
}
+
+void JavaThread::cache_global_variables() { }
+
--- a/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -377,18 +377,84 @@
st->print_cr("Registers:");
#ifdef AMD64
- st->print( "EAX=" INTPTR_FORMAT, uc->Rax);
- st->print(", EBX=" INTPTR_FORMAT, uc->Rbx);
- st->print(", ECX=" INTPTR_FORMAT, uc->Rcx);
- st->print(", EDX=" INTPTR_FORMAT, uc->Rdx);
+ st->print( "RAX=" INTPTR_FORMAT, uc->Rax);
+ st->print(", RBX=" INTPTR_FORMAT, uc->Rbx);
+ st->print(", RCX=" INTPTR_FORMAT, uc->Rcx);
+ st->print(", RDX=" INTPTR_FORMAT, uc->Rdx);
+ st->cr();
+ st->print( "RSP=" INTPTR_FORMAT, uc->Rsp);
+ st->print(", RBP=" INTPTR_FORMAT, uc->Rbp);
+ st->print(", RSI=" INTPTR_FORMAT, uc->Rsi);
+ st->print(", RDI=" INTPTR_FORMAT, uc->Rdi);
+ st->cr();
+ st->print( "R8=" INTPTR_FORMAT, uc->R8);
+ st->print(", R9=" INTPTR_FORMAT, uc->R9);
+ st->print(", R10=" INTPTR_FORMAT, uc->R10);
+ st->print(", R11=" INTPTR_FORMAT, uc->R11);
+ st->cr();
+ st->print( "R12=" INTPTR_FORMAT, uc->R12);
+ st->print(", R13=" INTPTR_FORMAT, uc->R13);
+ st->print(", R14=" INTPTR_FORMAT, uc->R14);
+ st->print(", R15=" INTPTR_FORMAT, uc->R15);
+ st->cr();
+ st->print( "RIP=" INTPTR_FORMAT, uc->Rip);
+ st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
+
+ st->cr();
+ st->cr();
+
+ st->print_cr("Register to memory mapping:");
+ st->cr();
+
+ // this is only for the "general purpose" registers
+
+ st->print_cr("RAX=" INTPTR_FORMAT, uc->Rax);
+ print_location(st, uc->Rax);
+ st->cr();
+ st->print_cr("RBX=" INTPTR_FORMAT, uc->Rbx);
+ print_location(st, uc->Rbx);
st->cr();
- st->print( "ESP=" INTPTR_FORMAT, uc->Rsp);
- st->print(", EBP=" INTPTR_FORMAT, uc->Rbp);
- st->print(", ESI=" INTPTR_FORMAT, uc->Rsi);
- st->print(", EDI=" INTPTR_FORMAT, uc->Rdi);
+ st->print_cr("RCX=" INTPTR_FORMAT, uc->Rcx);
+ print_location(st, uc->Rcx);
+ st->cr();
+ st->print_cr("RDX=" INTPTR_FORMAT, uc->Rdx);
+ print_location(st, uc->Rdx);
+ st->cr();
+ st->print_cr("RSP=" INTPTR_FORMAT, uc->Rsp);
+ print_location(st, uc->Rsp);
+ st->cr();
+ st->print_cr("RBP=" INTPTR_FORMAT, uc->Rbp);
+ print_location(st, uc->Rbp);
+ st->cr();
+ st->print_cr("RSI=" INTPTR_FORMAT, uc->Rsi);
+ print_location(st, uc->Rsi);
+ st->cr();
+ st->print_cr("RDI=" INTPTR_FORMAT, uc->Rdi);
+ print_location(st, uc->Rdi);
+ st->cr();
+ st->print_cr("R8 =" INTPTR_FORMAT, uc->R8);
+ print_location(st, uc->R8);
st->cr();
- st->print( "EIP=" INTPTR_FORMAT, uc->Rip);
- st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
+ st->print_cr("R9 =" INTPTR_FORMAT, uc->R9);
+ print_location(st, uc->R9);
+ st->cr();
+ st->print_cr("R10=" INTPTR_FORMAT, uc->R10);
+ print_location(st, uc->R10);
+ st->cr();
+ st->print_cr("R11=" INTPTR_FORMAT, uc->R11);
+ print_location(st, uc->R11);
+ st->cr();
+ st->print_cr("R12=" INTPTR_FORMAT, uc->R12);
+ print_location(st, uc->R12);
+ st->cr();
+ st->print_cr("R13=" INTPTR_FORMAT, uc->R13);
+ print_location(st, uc->R13);
+ st->cr();
+ st->print_cr("R14=" INTPTR_FORMAT, uc->R14);
+ print_location(st, uc->R14);
+ st->cr();
+ st->print_cr("R15=" INTPTR_FORMAT, uc->R15);
+ print_location(st, uc->R15);
#else
st->print( "EAX=" INTPTR_FORMAT, uc->Eax);
st->print(", EBX=" INTPTR_FORMAT, uc->Ebx);
@@ -402,6 +468,38 @@
st->cr();
st->print( "EIP=" INTPTR_FORMAT, uc->Eip);
st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
+
+ st->cr();
+ st->cr();
+
+ st->print_cr("Register to memory mapping:");
+ st->cr();
+
+ // this is only for the "general purpose" registers
+
+ st->print_cr("EAX=" INTPTR_FORMAT, uc->Eax);
+ print_location(st, uc->Eax);
+ st->cr();
+ st->print_cr("EBX=" INTPTR_FORMAT, uc->Ebx);
+ print_location(st, uc->Ebx);
+ st->cr();
+ st->print_cr("ECX=" INTPTR_FORMAT, uc->Ecx);
+ print_location(st, uc->Ecx);
+ st->cr();
+ st->print_cr("EDX=" INTPTR_FORMAT, uc->Edx);
+ print_location(st, uc->Edx);
+ st->cr();
+ st->print_cr("ESP=" INTPTR_FORMAT, uc->Esp);
+ print_location(st, uc->Esp);
+ st->cr();
+ st->print_cr("EBP=" INTPTR_FORMAT, uc->Ebp);
+ print_location(st, uc->Ebp);
+ st->cr();
+ st->print_cr("ESI=" INTPTR_FORMAT, uc->Esi);
+ print_location(st, uc->Esi);
+ st->cr();
+ st->print_cr("EDI=" INTPTR_FORMAT, uc->Edi);
+ print_location(st, uc->Edi);
#endif // AMD64
st->cr();
st->cr();
--- a/hotspot/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -84,3 +84,6 @@
// nothing else to try
return false;
}
+
+void JavaThread::cache_global_variables() { }
+
--- a/hotspot/src/share/vm/asm/codeBuffer.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/asm/codeBuffer.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -128,7 +128,11 @@
delete _overflow_arena;
#ifdef ASSERT
+ // Save allocation type to execute assert in ~ResourceObj()
+ // which is called after this destructor.
+ ResourceObj::allocation_type at = _default_oop_recorder.get_allocation_type();
Copy::fill_to_bytes(this, sizeof(*this), badResourceValue);
+ ResourceObj::set_allocation_type((address)(&_default_oop_recorder), at);
#endif
}
--- a/hotspot/src/share/vm/asm/codeBuffer.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/asm/codeBuffer.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -102,7 +102,7 @@
_locs_point = NULL;
_locs_own = false;
_frozen = false;
- debug_only(_index = -1);
+ debug_only(_index = (char)-1);
debug_only(_outer = (CodeBuffer*)badAddress);
}
@@ -278,7 +278,7 @@
// special case during expansion which is handled internally. This
// is done to guarantee proper cleanup of resources.
void* operator new(size_t size) { return ResourceObj::operator new(size); }
- void operator delete(void* p) { ResourceObj::operator delete(p); }
+ void operator delete(void* p) { ShouldNotCallThis(); }
public:
typedef int csize_t; // code size type; would be size_t except for history
--- a/hotspot/src/share/vm/c1/c1_CodeStubs.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_CodeStubs.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -448,6 +448,10 @@
_obj(obj), _info(info), _stub(stub) {
}
+ void set_obj(LIR_Opr obj) {
+ _obj = obj;
+ }
+
virtual void emit_code(LIR_Assembler* e);
virtual CodeEmitInfo* info() const { return _info; }
virtual bool is_exception_throw_stub() const { return true; }
--- a/hotspot/src/share/vm/c1/c1_Compilation.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_Compilation.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -220,11 +220,13 @@
code_offsets->set_value(CodeOffsets::Deopt, assembler->emit_deopt_handler());
CHECK_BAILOUT();
- // Generate code for MethodHandle deopt handler. We can use the
- // same code as for the normal deopt handler, we just need a
- // different entry point address.
- code_offsets->set_value(CodeOffsets::DeoptMH, assembler->emit_deopt_handler());
- CHECK_BAILOUT();
+ // Emit the MethodHandle deopt handler code (if required).
+ if (has_method_handle_invokes()) {
+ // We can use the same code as for the normal deopt handler, we
+ // just need a different entry point address.
+ code_offsets->set_value(CodeOffsets::DeoptMH, assembler->emit_deopt_handler());
+ CHECK_BAILOUT();
+ }
// Emit the handler to remove the activation from the stack and
// dispatch to the caller.
@@ -446,6 +448,7 @@
, _has_exception_handlers(false)
, _has_fpu_code(true) // pessimistic assumption
, _has_unsafe_access(false)
+, _has_method_handle_invokes(false)
, _bailout_msg(NULL)
, _exception_info_list(NULL)
, _allocator(NULL)
--- a/hotspot/src/share/vm/c1/c1_Compilation.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_Compilation.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -69,6 +69,7 @@
bool _has_exception_handlers;
bool _has_fpu_code;
bool _has_unsafe_access;
+ bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
const char* _bailout_msg;
ExceptionInfoList* _exception_info_list;
ExceptionHandlerTable _exception_handler_table;
@@ -147,6 +148,10 @@
// Statistics gathering
void notice_inlined_method(ciMethod* method);
+ // JSR 292
+ bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
+ void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
+
DebugInformationRecorder* debug_info_recorder() const; // = _env->debug_info();
Dependencies* dependency_recorder() const; // = _env->dependencies()
ImplicitExceptionTable* implicit_exception_table() { return &_implicit_exception_table; }
@@ -168,10 +173,19 @@
const char* bailout_msg() const { return _bailout_msg; }
static int desired_max_code_buffer_size() {
+#ifndef PPC
return (int) NMethodSizeLimit; // default 256K or 512K
+#else
+ // conditional branches on PPC are restricted to 16 bit signed
+ return MAX2((unsigned int)NMethodSizeLimit,32*K);
+#endif
}
static int desired_max_constant_size() {
+#ifndef PPC
return (int) NMethodSizeLimit / 10; // about 25K
+#else
+ return (MAX2((unsigned int)NMethodSizeLimit, 32*K)) / 10;
+#endif
}
static void setup_code_buffer(CodeBuffer* cb, int call_stub_estimate);
--- a/hotspot/src/share/vm/c1/c1_FrameMap.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_FrameMap.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -90,7 +90,7 @@
if (outgoing) {
// update the space reserved for arguments.
- update_reserved_argument_area_size(out_preserve);
+ update_reserved_argument_area_size(out_preserve * BytesPerWord);
}
return new CallingConvention(args, out_preserve);
}
@@ -138,7 +138,7 @@
}
assert(args->length() == signature->length(), "size mismatch");
out_preserve += SharedRuntime::out_preserve_stack_slots();
- update_reserved_argument_area_size(out_preserve);
+ update_reserved_argument_area_size(out_preserve * BytesPerWord);
return new CallingConvention(args, out_preserve);
}
--- a/hotspot/src/share/vm/c1/c1_FrameMap.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_FrameMap.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -154,7 +154,6 @@
static LIR_Opr method_handle_invoke_SP_save_opr();
static BasicTypeArray* signature_type_array_for(const ciMethod* method);
- static BasicTypeArray* signature_type_array_for(const char * signature);
// for outgoing calls, these also update the reserved area to
// include space for arguments and any ABI area.
--- a/hotspot/src/share/vm/c1/c1_LIR.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_LIR.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -50,8 +50,7 @@
#endif // X86
-
-#ifdef SPARC
+#if defined(SPARC) || defined(PPC)
FloatRegister LIR_OprDesc::as_float_reg() const {
return FrameMap::nr2floatreg(fpu_regnr());
@@ -63,6 +62,19 @@
#endif
+#ifdef ARM
+
+FloatRegister LIR_OprDesc::as_float_reg() const {
+ return as_FloatRegister(fpu_regnr());
+}
+
+FloatRegister LIR_OprDesc::as_double_reg() const {
+ return as_FloatRegister(fpu_regnrLo());
+}
+
+#endif
+
+
LIR_Opr LIR_OprFact::illegalOpr = LIR_OprFact::illegal();
LIR_Opr LIR_OprFact::value_type(ValueType* type) {
@@ -119,10 +131,14 @@
#ifndef PRODUCT
void LIR_Address::verify() const {
-#ifdef SPARC
- assert(scale() == times_1, "Scaled addressing mode not available on SPARC and should not be used");
+#if defined(SPARC) || defined(PPC)
+ assert(scale() == times_1, "Scaled addressing mode not available on SPARC/PPC and should not be used");
assert(disp() == 0 || index()->is_illegal(), "can't have both");
#endif
+#ifdef ARM
+ assert(disp() == 0 || index()->is_illegal(), "can't have both");
+ assert(-4096 < disp() && disp() < 4096, "architecture constraint");
+#endif
#ifdef _LP64
assert(base()->is_cpu_register(), "wrong base operand");
assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
@@ -173,13 +189,22 @@
if (!is_pointer() && !is_illegal()) {
switch (as_BasicType(type_field())) {
case T_LONG:
- assert((kind_field() == cpu_register || kind_field() == stack_value) && size_field() == double_size, "must match");
+ assert((kind_field() == cpu_register || kind_field() == stack_value) &&
+ size_field() == double_size, "must match");
break;
case T_FLOAT:
- assert((kind_field() == fpu_register || kind_field() == stack_value) && size_field() == single_size, "must match");
+ // FP return values can be also in CPU registers on ARM and PPC (softfp ABI)
+ assert((kind_field() == fpu_register || kind_field() == stack_value
+ ARM_ONLY(|| kind_field() == cpu_register)
+ PPC_ONLY(|| kind_field() == cpu_register) ) &&
+ size_field() == single_size, "must match");
break;
case T_DOUBLE:
- assert((kind_field() == fpu_register || kind_field() == stack_value) && size_field() == double_size, "must match");
+ // FP return values can be also in CPU registers on ARM and PPC (softfp ABI)
+ assert((kind_field() == fpu_register || kind_field() == stack_value
+ ARM_ONLY(|| kind_field() == cpu_register)
+ PPC_ONLY(|| kind_field() == cpu_register) ) &&
+ size_field() == double_size, "must match");
break;
case T_BOOLEAN:
case T_CHAR:
@@ -188,7 +213,8 @@
case T_INT:
case T_OBJECT:
case T_ARRAY:
- assert((kind_field() == cpu_register || kind_field() == stack_value) && size_field() == single_size, "must match");
+ assert((kind_field() == cpu_register || kind_field() == stack_value) &&
+ size_field() == single_size, "must match");
break;
case T_ILLEGAL:
@@ -503,6 +529,10 @@
assert(opConvert->_info == NULL, "must be");
if (opConvert->_opr->is_valid()) do_input(opConvert->_opr);
if (opConvert->_result->is_valid()) do_output(opConvert->_result);
+#ifdef PPC
+ if (opConvert->_tmp1->is_valid()) do_temp(opConvert->_tmp1);
+ if (opConvert->_tmp2->is_valid()) do_temp(opConvert->_tmp2);
+#endif
do_stub(opConvert->_stub);
break;
@@ -530,7 +560,9 @@
LIR_OpAllocObj* opAllocObj = (LIR_OpAllocObj*)op;
if (opAllocObj->_info) do_info(opAllocObj->_info);
- if (opAllocObj->_opr->is_valid()) do_input(opAllocObj->_opr);
+ if (opAllocObj->_opr->is_valid()) { do_input(opAllocObj->_opr);
+ do_temp(opAllocObj->_opr);
+ }
if (opAllocObj->_tmp1->is_valid()) do_temp(opAllocObj->_tmp1);
if (opAllocObj->_tmp2->is_valid()) do_temp(opAllocObj->_tmp2);
if (opAllocObj->_tmp3->is_valid()) do_temp(opAllocObj->_tmp3);
@@ -826,10 +858,16 @@
assert(op->as_OpCompareAndSwap() != NULL, "must be");
LIR_OpCompareAndSwap* opCompareAndSwap = (LIR_OpCompareAndSwap*)op;
+ assert(opCompareAndSwap->_addr->is_valid(), "used");
+ assert(opCompareAndSwap->_cmp_value->is_valid(), "used");
+ assert(opCompareAndSwap->_new_value->is_valid(), "used");
if (opCompareAndSwap->_info) do_info(opCompareAndSwap->_info);
- if (opCompareAndSwap->_addr->is_valid()) do_input(opCompareAndSwap->_addr);
- if (opCompareAndSwap->_cmp_value->is_valid()) do_input(opCompareAndSwap->_cmp_value);
- if (opCompareAndSwap->_new_value->is_valid()) do_input(opCompareAndSwap->_new_value);
+ do_input(opCompareAndSwap->_addr);
+ do_temp(opCompareAndSwap->_addr);
+ do_input(opCompareAndSwap->_cmp_value);
+ do_temp(opCompareAndSwap->_cmp_value);
+ do_input(opCompareAndSwap->_new_value);
+ do_temp(opCompareAndSwap->_new_value);
if (opCompareAndSwap->_tmp1->is_valid()) do_temp(opCompareAndSwap->_tmp1);
if (opCompareAndSwap->_tmp2->is_valid()) do_temp(opCompareAndSwap->_tmp2);
if (opCompareAndSwap->_result->is_valid()) do_output(opCompareAndSwap->_result);
@@ -1303,13 +1341,13 @@
info));
}
-void LIR_List::unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, CodeStub* stub) {
+void LIR_List::unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub) {
append(new LIR_OpLock(
lir_unlock,
hdr,
obj,
lock,
- LIR_OprFact::illegalOpr,
+ scratch,
stub,
NULL));
}
@@ -1342,22 +1380,19 @@
}
-void LIR_List::cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2) {
- // Compare and swap produces condition code "zero" if contents_of(addr) == cmp_value,
- // implying successful swap of new_value into addr
- append(new LIR_OpCompareAndSwap(lir_cas_long, addr, cmp_value, new_value, t1, t2));
+void LIR_List::cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
+ LIR_Opr t1, LIR_Opr t2, LIR_Opr result) {
+ append(new LIR_OpCompareAndSwap(lir_cas_long, addr, cmp_value, new_value, t1, t2, result));
}
-void LIR_List::cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2) {
- // Compare and swap produces condition code "zero" if contents_of(addr) == cmp_value,
- // implying successful swap of new_value into addr
- append(new LIR_OpCompareAndSwap(lir_cas_obj, addr, cmp_value, new_value, t1, t2));
+void LIR_List::cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
+ LIR_Opr t1, LIR_Opr t2, LIR_Opr result) {
+ append(new LIR_OpCompareAndSwap(lir_cas_obj, addr, cmp_value, new_value, t1, t2, result));
}
-void LIR_List::cas_int(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2) {
- // Compare and swap produces condition code "zero" if contents_of(addr) == cmp_value,
- // implying successful swap of new_value into addr
- append(new LIR_OpCompareAndSwap(lir_cas_int, addr, cmp_value, new_value, t1, t2));
+void LIR_List::cas_int(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
+ LIR_Opr t1, LIR_Opr t2, LIR_Opr result) {
+ append(new LIR_OpCompareAndSwap(lir_cas_int, addr, cmp_value, new_value, t1, t2, result));
}
@@ -1400,6 +1435,11 @@
out->print("fpu%d", fpu_regnr());
} else if (is_double_fpu()) {
out->print("fpu%d", fpu_regnrLo());
+#elif defined(ARM)
+ } else if (is_single_fpu()) {
+ out->print("s%d", fpu_regnr());
+ } else if (is_double_fpu()) {
+ out->print("d%d", fpu_regnrLo() >> 1);
#else
} else if (is_single_fpu()) {
out->print(as_float_reg()->name());
@@ -1756,6 +1796,12 @@
print_bytecode(out, bytecode());
in_opr()->print(out); out->print(" ");
result_opr()->print(out); out->print(" ");
+#ifdef PPC
+ if(tmp1()->is_valid()) {
+ tmp1()->print(out); out->print(" ");
+ tmp2()->print(out); out->print(" ");
+ }
+#endif
}
void LIR_OpConvert::print_bytecode(outputStream* out, Bytecodes::Code code) {
--- a/hotspot/src/share/vm/c1/c1_LIR.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_LIR.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -432,8 +432,7 @@
// for compatibility with RInfo
int fpu () const { return lo_reg_half(); }
#endif // X86
-
-#ifdef SPARC
+#if defined(SPARC) || defined(ARM) || defined(PPC)
FloatRegister as_float_reg () const;
FloatRegister as_double_reg () const;
#endif
@@ -519,14 +518,14 @@
, _type(type)
, _disp(0) { verify(); }
-#ifdef X86
+#if defined(X86) || defined(ARM)
LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type):
_base(base)
, _index(index)
, _scale(scale)
, _type(type)
, _disp(disp) { verify(); }
-#endif // X86
+#endif // X86 || ARM
LIR_Opr base() const { return _base; }
LIR_Opr index() const { return _index; }
@@ -566,7 +565,11 @@
LIR_OprDesc::float_type |
LIR_OprDesc::fpu_register |
LIR_OprDesc::single_size); }
-
+#if defined(ARM)
+ static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); }
+ static LIR_Opr single_softfp(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::float_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
+ static LIR_Opr double_softfp(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size); }
+#endif
#ifdef SPARC
static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
(reg2 << LIR_OprDesc::reg2_shift) |
@@ -593,7 +596,22 @@
LIR_OprDesc::double_size |
LIR_OprDesc::is_xmm_mask); }
#endif // X86
-
+#ifdef PPC
+ static LIR_Opr double_fpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
+ (reg << LIR_OprDesc::reg2_shift) |
+ LIR_OprDesc::double_type |
+ LIR_OprDesc::fpu_register |
+ LIR_OprDesc::double_size); }
+ static LIR_Opr single_softfp(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) |
+ LIR_OprDesc::float_type |
+ LIR_OprDesc::cpu_register |
+ LIR_OprDesc::single_size); }
+ static LIR_Opr double_softfp(int reg1, int reg2) { return (LIR_Opr)((reg2 << LIR_OprDesc::reg1_shift) |
+ (reg1 << LIR_OprDesc::reg2_shift) |
+ LIR_OprDesc::double_type |
+ LIR_OprDesc::cpu_register |
+ LIR_OprDesc::double_size); }
+#endif // PPC
static LIR_Opr virtual_register(int index, BasicType type) {
LIR_Opr res;
@@ -623,6 +641,22 @@
LIR_OprDesc::virtual_mask);
break;
+#ifdef __SOFTFP__
+ case T_FLOAT:
+ res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
+ LIR_OprDesc::float_type |
+ LIR_OprDesc::cpu_register |
+ LIR_OprDesc::single_size |
+ LIR_OprDesc::virtual_mask);
+ break;
+ case T_DOUBLE:
+ res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
+ LIR_OprDesc::double_type |
+ LIR_OprDesc::cpu_register |
+ LIR_OprDesc::double_size |
+ LIR_OprDesc::virtual_mask);
+ break;
+#else // __SOFTFP__
case T_FLOAT:
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
LIR_OprDesc::float_type |
@@ -638,7 +672,7 @@
LIR_OprDesc::double_size |
LIR_OprDesc::virtual_mask);
break;
-
+#endif // __SOFTFP__
default: ShouldNotReachHere(); res = illegalOpr;
}
@@ -650,11 +684,18 @@
// old-style calculation; check if old and new method are equal
LIR_OprDesc::OprType t = as_OprType(type);
+#ifdef __SOFTFP__
+ LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
+ t |
+ LIR_OprDesc::cpu_register |
+ LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
+#else // __SOFTFP__
LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | t |
((type == T_FLOAT || type == T_DOUBLE) ? LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) |
LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
assert(res == old_res, "old and new method not equal");
-#endif
+#endif // __SOFTFP__
+#endif // ASSERT
return res;
}
@@ -1306,15 +1347,37 @@
private:
Bytecodes::Code _bytecode;
ConversionStub* _stub;
+#ifdef PPC
+ LIR_Opr _tmp1;
+ LIR_Opr _tmp2;
+#endif
public:
LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub)
: LIR_Op1(lir_convert, opr, result)
, _stub(stub)
+#ifdef PPC
+ , _tmp1(LIR_OprDesc::illegalOpr())
+ , _tmp2(LIR_OprDesc::illegalOpr())
+#endif
, _bytecode(code) {}
+#ifdef PPC
+ LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub
+ ,LIR_Opr tmp1, LIR_Opr tmp2)
+ : LIR_Op1(lir_convert, opr, result)
+ , _stub(stub)
+ , _tmp1(tmp1)
+ , _tmp2(tmp2)
+ , _bytecode(code) {}
+#endif
+
Bytecodes::Code bytecode() const { return _bytecode; }
ConversionStub* stub() const { return _stub; }
+#ifdef PPC
+ LIR_Opr tmp1() const { return _tmp1; }
+ LIR_Opr tmp2() const { return _tmp2; }
+#endif
virtual void emit_code(LIR_Assembler* masm);
virtual LIR_OpConvert* as_OpConvert() { return this; }
@@ -1502,6 +1565,9 @@
LIR_Condition condition() const {
assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); return _condition;
}
+ void set_condition(LIR_Condition condition) {
+ assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); _condition = condition;
+ }
void set_fpu_stack_size(int size) { _fpu_stack_size = size; }
int fpu_stack_size() const { return _fpu_stack_size; }
@@ -1650,8 +1716,9 @@
LIR_Opr _tmp2;
public:
- LIR_OpCompareAndSwap(LIR_Code code, LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2)
- : LIR_Op(code, LIR_OprFact::illegalOpr, NULL) // no result, no info
+ LIR_OpCompareAndSwap(LIR_Code code, LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
+ LIR_Opr t1, LIR_Opr t2, LIR_Opr result)
+ : LIR_Op(code, result, NULL) // no result, no info
, _addr(addr)
, _cmp_value(cmp_value)
, _new_value(new_value)
@@ -1832,6 +1899,9 @@
void safepoint(LIR_Opr tmp, CodeEmitInfo* info) { append(new LIR_Op1(lir_safepoint, tmp, info)); }
+#ifdef PPC
+ void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_OpConvert(code, left, dst, NULL, tmp1, tmp2)); }
+#endif
void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = NULL/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); }
void logical_and (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_and, left, right, dst)); }
@@ -1867,9 +1937,12 @@
append(new LIR_Op2(lir_cmove, condition, src1, src2, dst));
}
- void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2);
- void cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2);
- void cas_int(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2);
+ void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
+ LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
+ void cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
+ LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
+ void cas_int(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
+ LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
void abs (LIR_Opr from, LIR_Opr to, LIR_Opr tmp) { append(new LIR_Op2(lir_abs , from, tmp, to)); }
void sqrt(LIR_Opr from, LIR_Opr to, LIR_Opr tmp) { append(new LIR_Op2(lir_sqrt, from, tmp, to)); }
@@ -1950,7 +2023,7 @@
}
void load_stack_address_monitor(int monitor_ix, LIR_Opr dst) { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); }
- void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, CodeStub* stub);
+ void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub);
void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info);
void set_24bit_fpu() { append(new LIR_Op0(lir_24bit_FPU )); }
--- a/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -438,6 +438,12 @@
default: ShouldNotReachHere();
}
+ // JSR 292
+ // Record if this method has MethodHandle invokes.
+ if (op->is_method_handle_invoke()) {
+ compilation()->set_has_method_handle_invokes(true);
+ }
+
#if defined(X86) && defined(TIERED)
// C2 leave fpu stack dirty clean it
if (UseSSE < 2) {
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -31,6 +31,12 @@
#define __ gen()->lir()->
#endif
+// TODO: ARM - Use some recognizable constant which still fits architectural constraints
+#ifdef ARM
+#define PATCHED_ADDR (204)
+#else
+#define PATCHED_ADDR (max_jint)
+#endif
void PhiResolverState::reset(int max_vregs) {
// Initialize array sizes
@@ -225,13 +231,13 @@
void LIRItem::load_item_force(LIR_Opr reg) {
LIR_Opr r = result();
if (r != reg) {
+#if !defined(ARM) && !defined(E500V2)
if (r->type() != reg->type()) {
// moves between different types need an intervening spill slot
- LIR_Opr tmp = _gen->force_to_spill(r, reg->type());
- __ move(tmp, reg);
- } else {
- __ move(r, reg);
+ r = _gen->force_to_spill(r, reg->type());
}
+#endif
+ __ move(r, reg);
_result = reg;
}
}
@@ -628,14 +634,14 @@
}
-void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, int monitor_no) {
+void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
if (!GenerateSynchronizationCode) return;
// setup registers
LIR_Opr hdr = lock;
lock = new_hdr;
CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
__ load_stack_address_monitor(monitor_no, lock);
- __ unlock_object(hdr, object, lock, slow_path);
+ __ unlock_object(hdr, object, lock, scratch, slow_path);
}
@@ -1400,6 +1406,25 @@
}
assert(addr->is_register(), "must be a register at this point");
+#ifdef ARM
+ // TODO: ARM - move to platform-dependent code
+ LIR_Opr tmp = FrameMap::R14_opr;
+ if (VM_Version::supports_movw()) {
+ __ move((LIR_Opr)card_table_base, tmp);
+ } else {
+ __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
+ }
+
+ CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
+ LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
+ if(((int)ct->byte_map_base & 0xff) == 0) {
+ __ move(tmp, card_addr);
+ } else {
+ LIR_Opr tmp_zero = new_register(T_INT);
+ __ move(LIR_OprFact::intConst(0), tmp_zero);
+ __ move(tmp_zero, card_addr);
+ }
+#else // ARM
LIR_Opr tmp = new_pointer_register();
if (TwoOperandLIRForm) {
__ move(addr, tmp);
@@ -1415,6 +1440,7 @@
new LIR_Address(tmp, load_constant(card_table_base),
T_BYTE));
}
+#endif // ARM
}
@@ -1507,7 +1533,7 @@
// generate_address to try to be smart about emitting the -1.
// Otherwise the patching code won't know how to find the
// instruction to patch.
- address = new LIR_Address(object.result(), max_jint, field_type);
+ address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
} else {
address = generate_address(object.result(), x->offset(), field_type);
}
@@ -1584,7 +1610,7 @@
// generate_address to try to be smart about emitting the -1.
// Otherwise the patching code won't know how to find the
// instruction to patch.
- address = new LIR_Address(object.result(), max_jint, field_type);
+ address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
} else {
address = generate_address(object.result(), x->offset(), field_type);
}
@@ -1844,6 +1870,8 @@
}
#endif
addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
+#elif defined(ARM)
+ addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
#else
if (index_op->is_illegal() || log2_scale == 0) {
#ifdef _LP64
@@ -1916,6 +1944,7 @@
__ convert(Bytecodes::_i2l, idx.result(), index_op);
} else {
#endif
+ // TODO: ARM also allows embedded shift in the address
__ move(idx.result(), index_op);
#ifdef _LP64
}
@@ -2204,7 +2233,10 @@
// Assign new location to Local instruction for this local
Local* local = x->state()->local_at(java_index)->as_Local();
assert(local != NULL, "Locals for incoming arguments must have been created");
+#ifndef __SOFTFP__
+ // The java calling convention passes double as long and float as int.
assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
+#endif // __SOFTFP__
local->set_operand(dest);
_instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
java_index += type2size[t];
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -314,7 +314,7 @@
void logic_op (Bytecodes::Code code, LIR_Opr dst_reg, LIR_Opr left, LIR_Opr right);
void monitor_enter (LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info);
- void monitor_exit (LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, int monitor_no);
+ void monitor_exit (LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no);
void new_instance (LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info);
@@ -338,6 +338,9 @@
}
LIR_Address* emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, BasicType type, bool needs_card_mark);
+ // the helper for generate_address
+ void add_large_constant(LIR_Opr src, int c, LIR_Opr dest);
+
// machine preferences and characteristics
bool can_inline_as_constant(Value i) const;
bool can_inline_as_constant(LIR_Const* c) const;
@@ -393,6 +396,10 @@
return l;
}
+#ifdef __SOFTFP__
+ void do_soft_float_compare(If *x);
+#endif // __SOFTFP__
+
void init();
SwitchRangeArray* create_lookup_ranges(TableSwitch* x);
@@ -444,6 +451,7 @@
static LIR_Opr remOutOpr();
static LIR_Opr shiftCountOpr();
LIR_Opr syncTempOpr();
+ LIR_Opr atomicLockOpr();
// returns a register suitable for saving the thread in a
// call_runtime_leaf if one is needed.
--- a/hotspot/src/share/vm/c1/c1_LinearScan.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_LinearScan.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -169,7 +169,11 @@
}
bool LinearScan::is_virtual_cpu_interval(const Interval* i) {
+#if defined(__SOFTFP__) || defined(E500V2)
+ return i->reg_num() >= LIR_OprDesc::vreg_base;
+#else
return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() != T_FLOAT && i->type() != T_DOUBLE);
+#endif // __SOFTFP__ or E500V2
}
bool LinearScan::is_precolored_fpu_interval(const Interval* i) {
@@ -177,7 +181,11 @@
}
bool LinearScan::is_virtual_fpu_interval(const Interval* i) {
+#if defined(__SOFTFP__) || defined(E500V2)
+ return false;
+#else
return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() == T_FLOAT || i->type() == T_DOUBLE);
+#endif // __SOFTFP__ or E500V2
}
bool LinearScan::is_in_fpu_register(const Interval* i) {
@@ -2010,12 +2018,18 @@
return LIR_OprFact::single_cpu_oop(assigned_reg);
}
+#ifdef __SOFTFP__
+ case T_FLOAT: // fall through
+#endif // __SOFTFP__
case T_INT: {
assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
assert(interval->assigned_regHi() == any_reg, "must not have hi register");
return LIR_OprFact::single_cpu(assigned_reg);
}
+#ifdef __SOFTFP__
+ case T_DOUBLE: // fall through
+#endif // __SOFTFP__
case T_LONG: {
int assigned_regHi = interval->assigned_regHi();
assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
@@ -2033,7 +2047,7 @@
#ifdef _LP64
return LIR_OprFact::double_cpu(assigned_reg, assigned_reg);
#else
-#ifdef SPARC
+#if defined(SPARC) || defined(PPC)
return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg);
#else
return LIR_OprFact::double_cpu(assigned_reg, assigned_regHi);
@@ -2041,6 +2055,7 @@
#endif // LP64
}
+#ifndef __SOFTFP__
case T_FLOAT: {
#ifdef X86
if (UseSSE >= 1) {
@@ -2069,6 +2084,11 @@
assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
LIR_Opr result = LIR_OprFact::double_fpu(interval->assigned_regHi() - pd_first_fpu_reg, assigned_reg - pd_first_fpu_reg);
+#elif defined(ARM)
+ assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
+ assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
+ assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
+ LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg, interval->assigned_regHi() - pd_first_fpu_reg);
#else
assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
assert(interval->assigned_regHi() == any_reg, "must not have hi register (double fpu values are stored in one register on Intel)");
@@ -2076,6 +2096,7 @@
#endif
return result;
}
+#endif // __SOFTFP__
default: {
ShouldNotReachHere();
@@ -2638,6 +2659,12 @@
#ifdef SPARC
assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)");
#endif
+#ifdef ARM
+ assert(opr->fpu_regnrHi() == opr->fpu_regnrLo() + 1, "assumed in calculation (only fpu_regnrLo is used)");
+#endif
+#ifdef PPC
+ assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)");
+#endif
VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi());
#ifdef _LP64
@@ -6135,6 +6162,17 @@
assert(prev_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
LIR_OpBranch* prev_branch = (LIR_OpBranch*)prev_op;
+ LIR_Op2* prev_cmp = NULL;
+
+ for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) {
+ prev_op = instructions->at(j);
+ if(prev_op->code() == lir_cmp) {
+ assert(prev_op->as_Op2() != NULL, "branch must be of type LIR_Op2");
+ prev_cmp = (LIR_Op2*)prev_op;
+ assert(prev_branch->cond() == prev_cmp->condition(), "should be the same");
+ }
+ }
+ assert(prev_cmp != NULL, "should have found comp instruction for branch");
if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) {
TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id()));
@@ -6142,6 +6180,7 @@
// eliminate a conditional branch to the immediate successor
prev_branch->change_block(last_branch->block());
prev_branch->negate_cond();
+ prev_cmp->set_condition(prev_branch->cond());
instructions->truncate(instructions->length() - 1);
}
}
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -144,7 +144,7 @@
#ifndef TIERED
case counter_overflow_id: // Not generated outside the tiered world
#endif
-#ifdef SPARC
+#if defined(SPARC) || defined(PPC)
case handle_exception_nofpu_id: // Unused on sparc
#endif
break;
@@ -240,7 +240,8 @@
#undef FUNCTION_CASE
- return "<unknown function>";
+ // Soft float adds more runtime names.
+ return pd_name_for_address(entry);
}
@@ -896,7 +897,10 @@
} else {
// patch the instruction <move reg, klass>
NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
- assert(n_copy->data() == 0, "illegal init value");
+
+ assert(n_copy->data() == 0 ||
+ n_copy->data() == (int)Universe::non_oop_word(),
+ "illegal init value");
assert(load_klass() != NULL, "klass not set");
n_copy->set_data((intx) (load_klass()));
@@ -904,7 +908,7 @@
Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
}
-#ifdef SPARC
+#if defined(SPARC) || defined(PPC)
// Update the oop location in the nmethod with the proper
// oop. When the code was generated, a NULL was stuffed
// in the oop table and that table needs to be update to
@@ -934,6 +938,14 @@
if (do_patch) {
// replace instructions
// first replace the tail, then the call
+#ifdef ARM
+ if(stub_id == Runtime1::load_klass_patching_id && !VM_Version::supports_movw()) {
+ copy_buff -= *byte_count;
+ NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff);
+ n_copy2->set_data((intx) (load_klass()), instr_pc);
+ }
+#endif
+
for (int i = NativeCall::instruction_size; i < *byte_count; i++) {
address ptr = copy_buff + i;
int a_byte = (*ptr) & 0xFF;
@@ -961,6 +973,12 @@
relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
relocInfo::none, relocInfo::oop_type);
#endif
+#ifdef PPC
+ { address instr_pc2 = instr_pc + NativeMovConstReg::lo_offset;
+ RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
+ relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2, relocInfo::none, relocInfo::oop_type);
+ }
+#endif
}
} else {
--- a/hotspot/src/share/vm/c1/c1_Runtime1.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -159,6 +159,9 @@
static const char* name_for (StubID id);
static const char* name_for_address(address entry);
+ // platform might add runtime names.
+ static const char* pd_name_for_address(address entry);
+
// method tracing
static void trace_block_entry(jint block_id);
--- a/hotspot/src/share/vm/ci/ciField.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciField.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -339,7 +339,7 @@
if (_type != NULL) _type->print_name();
else tty->print("(reference)");
tty->print(" is_constant=%s", bool_to_str(_is_constant));
- if (_is_constant) {
+ if (_is_constant && is_static()) {
tty->print(" constant_value=");
_constant_value.print();
}
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -403,8 +403,9 @@
instanceKlass* ik = get_instanceKlass();
int max_n_fields = ik->fields()->length()/instanceKlass::next_offset;
+ Arena* arena = curEnv->arena();
_non_static_fields =
- new (curEnv->arena()) GrowableArray<ciField*>(max_n_fields);
+ new (arena) GrowableArray<ciField*>(arena, max_n_fields, 0, NULL);
NonStaticFieldFiller filler(curEnv, _non_static_fields);
ik->do_nonstatic_fields(&filler);
}
--- a/hotspot/src/share/vm/ci/ciMethod.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -55,10 +55,10 @@
_exception_handlers = NULL;
_liveness = NULL;
_method_blocks = NULL;
-#ifdef COMPILER2
+#if defined(COMPILER2) || defined(SHARK)
_flow = NULL;
_bcea = NULL;
-#endif // COMPILER2
+#endif // COMPILER2 || SHARK
ciEnv *env = CURRENT_ENV;
if (env->jvmti_can_hotswap_or_post_breakpoint() && _is_compilable) {
@@ -123,10 +123,10 @@
_can_be_statically_bound = false;
_method_blocks = NULL;
_method_data = NULL;
-#ifdef COMPILER2
+#if defined(COMPILER2) || defined(SHARK)
_flow = NULL;
_bcea = NULL;
-#endif // COMPILER2
+#endif // COMPILER2 || SHARK
}
@@ -229,6 +229,20 @@
}
+#ifdef SHARK
+// ------------------------------------------------------------------
+// ciMethod::itable_index
+//
+// Get the position of this method's entry in the itable, if any.
+int ciMethod::itable_index() {
+ check_is_loaded();
+ assert(holder()->is_linked(), "must be linked");
+ VM_ENTRY_MARK;
+ return klassItable::compute_itable_index(get_methodOop());
+}
+#endif // SHARK
+
+
// ------------------------------------------------------------------
// ciMethod::native_entry
//
@@ -294,34 +308,34 @@
// ------------------------------------------------------------------
// ciMethod::get_flow_analysis
ciTypeFlow* ciMethod::get_flow_analysis() {
-#ifdef COMPILER2
+#if defined(COMPILER2) || defined(SHARK)
if (_flow == NULL) {
ciEnv* env = CURRENT_ENV;
_flow = new (env->arena()) ciTypeFlow(env, this);
_flow->do_flow();
}
return _flow;
-#else // COMPILER2
+#else // COMPILER2 || SHARK
ShouldNotReachHere();
return NULL;
-#endif // COMPILER2
+#endif // COMPILER2 || SHARK
}
// ------------------------------------------------------------------
// ciMethod::get_osr_flow_analysis
ciTypeFlow* ciMethod::get_osr_flow_analysis(int osr_bci) {
-#ifdef COMPILER2
+#if defined(COMPILER2) || defined(SHARK)
// OSR entry points are always place after a call bytecode of some sort
assert(osr_bci >= 0, "must supply valid OSR entry point");
ciEnv* env = CURRENT_ENV;
ciTypeFlow* flow = new (env->arena()) ciTypeFlow(env, this, osr_bci);
flow->do_flow();
return flow;
-#else // COMPILER2
+#else // COMPILER2 || SHARK
ShouldNotReachHere();
return NULL;
-#endif // COMPILER2
+#endif // COMPILER2 || SHARK
}
// ------------------------------------------------------------------
--- a/hotspot/src/share/vm/ci/ciMethod.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciMethod.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -70,7 +70,7 @@
// Optional liveness analyzer.
MethodLiveness* _liveness;
-#ifdef COMPILER2
+#if defined(COMPILER2) || defined(SHARK)
ciTypeFlow* _flow;
BCEscapeAnalyzer* _bcea;
#endif
@@ -141,6 +141,9 @@
// Runtime information.
int vtable_index();
+#ifdef SHARK
+ int itable_index();
+#endif // SHARK
address native_entry();
address interpreter_entry();
--- a/hotspot/src/share/vm/ci/ciMethodBlocks.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciMethodBlocks.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -252,7 +252,7 @@
_arena(arena), _num_blocks(0), _code_size(meth->code_size()) {
int block_estimate = _code_size / 8;
- _blocks = new(_arena) GrowableArray<ciBlock *>(block_estimate);
+ _blocks = new(_arena) GrowableArray<ciBlock *>(_arena, block_estimate, 0, NULL);
int b2bsize = _code_size * sizeof(ciBlock **);
_bci_to_block = (ciBlock **) arena->Amalloc(b2bsize);
Copy::zero_to_words((HeapWord*) _bci_to_block, b2bsize / sizeof(HeapWord));
--- a/hotspot/src/share/vm/ci/ciTypeFlow.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciTypeFlow.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -2591,7 +2591,7 @@
StateVector* temp_vector,
JsrSet* temp_set) {
int dft_len = 100;
- GrowableArray<Block*> stk(arena(), dft_len, 0, NULL);
+ GrowableArray<Block*> stk(dft_len);
ciBlock* dummy = _methodBlocks->make_dummy_block();
JsrSet* root_set = new JsrSet(NULL, 0);
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -62,6 +62,7 @@
ClassFileStream cfs1 = *cfs0;
ClassFileStream* cfs = &cfs1;
#ifdef ASSERT
+ assert(cfs->allocated_on_stack(),"should be local");
u1* old_current = cfs0->current();
#endif
--- a/hotspot/src/share/vm/classfile/verificationType.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/classfile/verificationType.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -70,7 +70,9 @@
} else if (is_array() && from.is_array()) {
VerificationType comp_this = get_component(CHECK_false);
VerificationType comp_from = from.get_component(CHECK_false);
- return comp_this.is_assignable_from(comp_from, context, CHECK_false);
+ if (!comp_this.is_bogus() && !comp_from.is_bogus()) {
+ return comp_this.is_assignable_from(comp_from, context, CHECK_false);
+ }
}
return false;
}
@@ -98,7 +100,7 @@
CHECK_(VerificationType::bogus_type()));
return VerificationType::reference_type(component);
default:
- ShouldNotReachHere();
+ // Met an invalid type signature, e.g. [X
return VerificationType::bogus_type();
}
}
--- a/hotspot/src/share/vm/classfile/verifier.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/classfile/verifier.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -1847,12 +1847,8 @@
if (type == VerificationType::uninitialized_this_type()) {
// The method must be an <init> method of either this class, or one of its
// superclasses
- klassOop oop = current_class()();
- Klass* klass = oop->klass_part();
- while (klass != NULL && ref_class_type.name() != klass->name()) {
- klass = klass->super()->klass_part();
- }
- if (klass == NULL) {
+ if (ref_class_type.name() != current_class()->name() &&
+ !name_in_supers(ref_class_type.name(), current_class())) {
verify_error(bci, "Bad <init> method call");
return;
}
--- a/hotspot/src/share/vm/code/codeBlob.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/code/codeBlob.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -564,72 +564,53 @@
ShouldNotReachHere();
}
-#ifndef PRODUCT
-
-void CodeBlob::print() const {
- tty->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", this);
- tty->print_cr("Framesize: %d", _frame_size);
+void CodeBlob::print_on(outputStream* st) const {
+ st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", this);
+ st->print_cr("Framesize: %d", _frame_size);
}
-
void CodeBlob::print_value_on(outputStream* st) const {
st->print_cr("[CodeBlob]");
}
-#endif
-
void BufferBlob::verify() {
// unimplemented
}
-#ifndef PRODUCT
-
-void BufferBlob::print() const {
- CodeBlob::print();
- print_value_on(tty);
+void BufferBlob::print_on(outputStream* st) const {
+ CodeBlob::print_on(st);
+ print_value_on(st);
}
-
void BufferBlob::print_value_on(outputStream* st) const {
st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", this, name());
}
-
-#endif
-
void RuntimeStub::verify() {
// unimplemented
}
-#ifndef PRODUCT
-
-void RuntimeStub::print() const {
- CodeBlob::print();
- tty->print("Runtime Stub (" INTPTR_FORMAT "): ", this);
- tty->print_cr(name());
- Disassembler::decode((CodeBlob*)this);
+void RuntimeStub::print_on(outputStream* st) const {
+ CodeBlob::print_on(st);
+ st->print("Runtime Stub (" INTPTR_FORMAT "): ", this);
+ st->print_cr(name());
+ Disassembler::decode((CodeBlob*)this, st);
}
-
void RuntimeStub::print_value_on(outputStream* st) const {
st->print("RuntimeStub (" INTPTR_FORMAT "): ", this); st->print(name());
}
-#endif
-
void SingletonBlob::verify() {
// unimplemented
}
-#ifndef PRODUCT
-
-void SingletonBlob::print() const {
- CodeBlob::print();
- tty->print_cr(name());
- Disassembler::decode((CodeBlob*)this);
+void SingletonBlob::print_on(outputStream* st) const {
+ CodeBlob::print_on(st);
+ st->print_cr(name());
+ Disassembler::decode((CodeBlob*)this, st);
}
-
void SingletonBlob::print_value_on(outputStream* st) const {
st->print_cr(name());
}
@@ -637,5 +618,3 @@
void DeoptimizationBlob::print_value_on(outputStream* st) const {
st->print_cr("Deoptimization (frame not available)");
}
-
-#endif // PRODUCT
--- a/hotspot/src/share/vm/code/codeBlob.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/code/codeBlob.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -163,8 +163,9 @@
// Debugging
virtual void verify();
- virtual void print() const PRODUCT_RETURN;
- virtual void print_value_on(outputStream* st) const PRODUCT_RETURN;
+ void print() const { print_on(tty); }
+ virtual void print_on(outputStream* st) const;
+ virtual void print_value_on(outputStream* st) const;
// Print the comment associated with offset on stream, if there is one
virtual void print_block_comment(outputStream* stream, address block_begin) {
@@ -209,8 +210,8 @@
bool is_alive() const { return true; }
void verify();
- void print() const PRODUCT_RETURN;
- void print_value_on(outputStream* st) const PRODUCT_RETURN;
+ void print_on(outputStream* st) const;
+ void print_value_on(outputStream* st) const;
};
@@ -292,8 +293,8 @@
bool is_alive() const { return true; }
void verify();
- void print() const PRODUCT_RETURN;
- void print_value_on(outputStream* st) const PRODUCT_RETURN;
+ void print_on(outputStream* st) const;
+ void print_value_on(outputStream* st) const;
};
@@ -317,8 +318,8 @@
bool is_alive() const { return true; }
void verify(); // does nothing
- void print() const PRODUCT_RETURN;
- void print_value_on(outputStream* st) const PRODUCT_RETURN;
+ void print_on(outputStream* st) const;
+ void print_value_on(outputStream* st) const;
};
@@ -373,7 +374,7 @@
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ }
// Printing
- void print_value_on(outputStream* st) const PRODUCT_RETURN;
+ void print_value_on(outputStream* st) const;
address unpack() const { return instructions_begin() + _unpack_offset; }
address unpack_with_exception() const { return instructions_begin() + _unpack_with_exception; }
--- a/hotspot/src/share/vm/code/nmethod.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/code/nmethod.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -65,6 +65,11 @@
if (is_native_method()) return false;
return compiler()->is_c2();
}
+bool nmethod::is_compiled_by_shark() const {
+ if (is_native_method()) return false;
+ assert(compiler() != NULL, "must be");
+ return compiler()->is_shark();
+}
@@ -1353,6 +1358,10 @@
CodeCache::remove_saved_code(this);
}
+#ifdef SHARK
+ ((SharkCompiler *) compiler())->free_compiled_method(instructions_begin());
+#endif // SHARK
+
((CodeBlob*)(this))->flush();
CodeCache::free(this);
@@ -1769,6 +1778,7 @@
// Method that knows how to preserve outgoing arguments at call. This method must be
// called with a frame corresponding to a Java invoke
void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
+#ifndef SHARK
if (!method()->is_native()) {
SimpleScopeDesc ssd(this, fr.pc());
Bytecode_invoke* call = Bytecode_invoke_at(ssd.method(), ssd.bci());
@@ -1776,6 +1786,7 @@
symbolOop signature = call->signature();
fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f);
}
+#endif // !SHARK
}
@@ -2279,6 +2290,8 @@
tty->print("(c1) ");
} else if (is_compiled_by_c2()) {
tty->print("(c2) ");
+ } else if (is_compiled_by_shark()) {
+ tty->print("(shark) ");
} else {
tty->print("(nm) ");
}
@@ -2472,8 +2485,12 @@
if (block_begin == exception_begin()) stream->print_cr("[Exception Handler]");
if (block_begin == stub_begin()) stream->print_cr("[Stub Code]");
if (block_begin == deopt_handler_begin()) stream->print_cr("[Deopt Handler Code]");
- if (block_begin == deopt_mh_handler_begin()) stream->print_cr("[Deopt MH Handler Code]");
+
+ if (has_method_handle_invokes())
+ if (block_begin == deopt_mh_handler_begin()) stream->print_cr("[Deopt MH Handler Code]");
+
if (block_begin == consts_begin()) stream->print_cr("[Constants]");
+
if (block_begin == entry_point()) {
methodHandle m = method();
if (m.not_null()) {
--- a/hotspot/src/share/vm/code/nmethod.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/code/nmethod.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -329,6 +329,7 @@
bool is_compiled_by_c1() const;
bool is_compiled_by_c2() const;
+ bool is_compiled_by_shark() const;
// boundaries for different parts
address code_begin () const { return _entry_point; }
@@ -606,6 +607,8 @@
void print_nul_chk_table() PRODUCT_RETURN;
void print_nmethod(bool print_code);
+ // need to re-define this from CodeBlob else the overload hides it
+ virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); }
void print_on(outputStream* st, const char* title) const;
// Logging
--- a/hotspot/src/share/vm/code/vtableStubs.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/code/vtableStubs.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -67,8 +67,8 @@
}
-void VtableStub::print() {
- tty->print("vtable stub (index = %d, receiver_location = %d, code = [" INTPTR_FORMAT ", " INTPTR_FORMAT "[)",
+void VtableStub::print_on(outputStream* st) const {
+ st->print("vtable stub (index = %d, receiver_location = %d, code = [" INTPTR_FORMAT ", " INTPTR_FORMAT "[)",
index(), receiver_location(), code_begin(), code_end());
}
--- a/hotspot/src/share/vm/code/vtableStubs.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/code/vtableStubs.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -86,7 +86,9 @@
bool is_abstract_method_error(address epc) { return epc == code_begin()+_ame_offset; }
bool is_null_pointer_exception(address epc) { return epc == code_begin()+_npe_offset; }
- void print();
+ void print_on(outputStream* st) const;
+ void print() const { print_on(tty); }
+
};
--- a/hotspot/src/share/vm/compiler/abstractCompiler.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/compiler/abstractCompiler.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,18 +45,26 @@
// Missing feature tests
virtual bool supports_native() { return true; }
virtual bool supports_osr () { return true; }
-#if defined(TIERED) || ( !defined(COMPILER1) && !defined(COMPILER2))
+#if defined(TIERED) || ( !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK))
virtual bool is_c1 () { return false; }
virtual bool is_c2 () { return false; }
+ virtual bool is_shark() { return false; }
#else
#ifdef COMPILER1
bool is_c1 () { return true; }
bool is_c2 () { return false; }
+ bool is_shark() { return false; }
#endif // COMPILER1
#ifdef COMPILER2
bool is_c1 () { return false; }
bool is_c2 () { return true; }
+ bool is_shark() { return false; }
#endif // COMPILER2
+#ifdef SHARK
+ bool is_c1 () { return false; }
+ bool is_c2 () { return false; }
+ bool is_shark() { return true; }
+#endif // SHARK
#endif // TIERED
// Customization
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -568,6 +568,14 @@
#endif
#endif // COMPILER2
+#ifdef SHARK
+#if defined(COMPILER1) || defined(COMPILER2)
+#error "Can't use COMPILER1 or COMPILER2 with shark"
+#endif
+ _compilers[0] = new SharkCompiler();
+ _compilers[1] = _compilers[0];
+#endif
+
// Initialize the CompileTask free list
_task_free_list = NULL;
--- a/hotspot/src/share/vm/compiler/disassembler.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/compiler/disassembler.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -371,7 +371,7 @@
address decode_env::decode_instructions(address start, address end) {
_start = start; _end = end;
- assert((((intptr_t)start | (intptr_t)end) % Disassembler::pd_instruction_alignment() == 0), "misaligned insn addr");
+ assert(((((intptr_t)start | (intptr_t)end) % Disassembler::pd_instruction_alignment()) == 0), "misaligned insn addr");
const int show_bytes = false; // for disassembler debugging
@@ -423,8 +423,14 @@
env.output()->print_cr("Decoding compiled method " INTPTR_FORMAT ":", nm);
env.output()->print_cr("Code:");
+#ifdef SHARK
+ SharkEntry* entry = (SharkEntry *) nm->instructions_begin();
+ unsigned char* p = entry->code_start();
+ unsigned char* end = entry->code_limit();
+#else
unsigned char* p = nm->instructions_begin();
unsigned char* end = nm->instructions_end();
+#endif // SHARK
// If there has been profiling, print the buckets.
if (FlatProfiler::bucket_start_for(p) != NULL) {
--- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -158,13 +158,18 @@
// The line below is the worst bit of C++ hackery I've ever written
// (Detlefs, 11/23). You should think of it as equivalent to
// "_regions(100, true)": initialize the growable array and inform it
- // that it should allocate its elem array(s) on the C heap. The first
- // argument, however, is actually a comma expression (new-expr, 100).
- // The purpose of the new_expr is to inform the growable array that it
- // is *already* allocated on the C heap: it uses the placement syntax to
- // keep it from actually doing any allocation.
- _markedRegions((ResourceObj::operator new (sizeof(GrowableArray<HeapRegion*>),
- (void*)&_markedRegions,
+ // that it should allocate its elem array(s) on the C heap.
+ //
+ // The first argument, however, is actually a comma expression
+ // (set_allocation_type(this, C_HEAP), 100). The purpose of the
+ // set_allocation_type() call is to replace the default allocation
+ // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
+ // allow to pass the assert in GenericGrowableArray() which checks
+ // that a growable array object must be on C heap if elements are.
+ //
+ // Note: containing object is allocated on C heap since it is CHeapObj.
+ //
+ _markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions,
ResourceObj::C_HEAP),
100),
true),
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -42,14 +42,19 @@
// The line below is the worst bit of C++ hackery I've ever written
// (Detlefs, 11/23). You should think of it as equivalent to
// "_regions(100, true)": initialize the growable array and inform it
- // that it should allocate its elem array(s) on the C heap. The first
- // argument, however, is actually a comma expression (new-expr, 100).
- // The purpose of the new_expr is to inform the growable array that it
- // is *already* allocated on the C heap: it uses the placement syntax to
- // keep it from actually doing any allocation.
- _regions((ResourceObj::operator new (sizeof(GrowableArray<HeapRegion*>),
- (void*)&_regions,
- ResourceObj::C_HEAP),
+ // that it should allocate its elem array(s) on the C heap.
+ //
+ // The first argument, however, is actually a comma expression
+ // (set_allocation_type(this, C_HEAP), 100). The purpose of the
+ // set_allocation_type() call is to replace the default allocation
+ // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
+ // allow to pass the assert in GenericGrowableArray() which checks
+ // that a growable array object must be on C heap if elements are.
+ //
+ // Note: containing object is allocated on C heap since it is CHeapObj.
+ //
+ _regions((ResourceObj::set_allocation_type((address)&_regions,
+ ResourceObj::C_HEAP),
(int)max_size),
true),
_next_rr_candidate(0),
--- a/hotspot/src/share/vm/includeDB_compiler1 Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/includeDB_compiler1 Fri Aug 13 10:55:42 2010 -0700
@@ -252,6 +252,7 @@
c1_LIRGenerator.cpp ciInstance.hpp
c1_LIRGenerator.cpp heapRegion.hpp
c1_LIRGenerator.cpp sharedRuntime.hpp
+c1_LIRGenerator.cpp stubRoutines.hpp
c1_LIRGenerator.hpp c1_Instruction.hpp
c1_LIRGenerator.hpp c1_LIR.hpp
@@ -270,6 +271,8 @@
c1_LIRGenerator_<arch>.cpp ciTypeArrayKlass.hpp
c1_LIRGenerator_<arch>.cpp sharedRuntime.hpp
c1_LIRGenerator_<arch>.cpp vmreg_<arch>.inline.hpp
+c1_LIRGenerator_<arch>.cpp stubRoutines.hpp
+
c1_LinearScan.cpp bitMap.inline.hpp
c1_LinearScan.cpp c1_CFGPrinter.hpp
@@ -413,6 +416,7 @@
compileBroker.cpp c1_Compiler.hpp
frame_<arch>.cpp c1_Runtime1.hpp
+frame_<arch>.cpp vframeArray.hpp
globals.cpp c1_globals.hpp
--- a/hotspot/src/share/vm/includeDB_compiler2 Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/includeDB_compiler2 Fri Aug 13 10:55:42 2010 -0700
@@ -911,6 +911,7 @@
reg_split.cpp addnode.hpp
reg_split.cpp allocation.inline.hpp
reg_split.cpp callnode.hpp
+reg_split.cpp c2compiler.hpp
reg_split.cpp cfgnode.hpp
reg_split.cpp chaitin.hpp
reg_split.cpp loopnode.hpp
--- a/hotspot/src/share/vm/includeDB_core Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/includeDB_core Fri Aug 13 10:55:42 2010 -0700
@@ -284,6 +284,7 @@
atomic_<os_arch>.inline.hpp atomic.hpp
atomic_<os_arch>.inline.hpp os.hpp
atomic_<os_arch>.inline.hpp vm_version_<arch>.hpp
+atomic_<os_arch>.inline.hpp orderAccess_<os_arch>.inline.hpp
// attachListener is jck optional, put cpp deps in includeDB_features
@@ -1734,6 +1735,7 @@
genCollectedHeap.cpp space.hpp
genCollectedHeap.cpp symbolTable.hpp
genCollectedHeap.cpp systemDictionary.hpp
+genCollectedHeap.cpp vmError.hpp
genCollectedHeap.cpp vmGCOperations.hpp
genCollectedHeap.cpp vmSymbols.hpp
genCollectedHeap.cpp vmThread.hpp
@@ -3230,6 +3232,7 @@
os.cpp events.hpp
os.cpp frame.inline.hpp
os.cpp hpi.hpp
+os.cpp icBuffer.hpp
os.cpp interfaceSupport.hpp
os.cpp interpreter.hpp
os.cpp java.hpp
@@ -3241,6 +3244,7 @@
os.cpp oop.inline.hpp
os.cpp os.hpp
os.cpp os_<os_family>.inline.hpp
+os.cpp privilegedStack.hpp
os.cpp stubRoutines.hpp
os.cpp systemDictionary.hpp
os.cpp threadService.hpp
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/includeDB_shark Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,371 @@
+//
+// Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+// Copyright 2008, 2009, 2010 Red Hat, Inc.
+// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+//
+// This code is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License version 2 only, as
+// published by the Free Software Foundation.
+//
+// This code is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// version 2 for more details (a copy is included in the LICENSE file that
+// accompanied this code).
+//
+// You should have received a copy of the GNU General Public License version
+// 2 along with this work; if not, write to the Free Software Foundation,
+// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+// or visit www.oracle.com if you need additional information or have any
+// questions.
+//
+//
+
+// NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!
+
+ciMethod.cpp ciTypeFlow.hpp
+ciMethod.cpp methodOop.hpp
+
+ciTypeFlow.cpp allocation.inline.hpp
+ciTypeFlow.cpp bytecode.hpp
+ciTypeFlow.cpp bytecodes.hpp
+ciTypeFlow.cpp ciConstant.hpp
+ciTypeFlow.cpp ciField.hpp
+ciTypeFlow.cpp ciMethod.hpp
+ciTypeFlow.cpp ciMethodData.hpp
+ciTypeFlow.cpp ciObjArrayKlass.hpp
+ciTypeFlow.cpp ciStreams.hpp
+ciTypeFlow.cpp ciTypeArrayKlass.hpp
+ciTypeFlow.cpp ciTypeFlow.hpp
+ciTypeFlow.cpp compileLog.hpp
+ciTypeFlow.cpp deoptimization.hpp
+ciTypeFlow.cpp growableArray.hpp
+ciTypeFlow.cpp shark_globals.hpp
+
+ciTypeFlow.hpp ciEnv.hpp
+ciTypeFlow.hpp ciKlass.hpp
+ciTypeFlow.hpp ciMethodBlocks.hpp
+
+cppInterpreter_<arch>.cpp shark_globals.hpp
+
+compileBroker.cpp sharkCompiler.hpp
+
+disassembler.cpp sharkEntry.hpp
+
+globals.hpp shark_globals_<arch>.hpp
+
+globals.cpp shark_globals.hpp
+
+llvmValue.hpp llvmHeaders.hpp
+llvmValue.hpp sharkContext.hpp
+llvmValue.hpp sharkType.hpp
+
+nmethod.cpp sharkCompiler.hpp
+
+sharedRuntime_<arch>.cpp compileBroker.hpp
+sharedRuntime_<arch>.cpp sharkCompiler.hpp
+
+shark_globals.cpp shark_globals.hpp
+
+shark_globals.hpp shark_globals_<arch>.hpp
+shark_globals.hpp globals.hpp
+
+sharkBlock.cpp debug.hpp
+sharkBlock.cpp bytecodes.hpp
+sharkBlock.cpp llvmHeaders.hpp
+sharkBlock.cpp llvmValue.hpp
+sharkBlock.cpp shark_globals.hpp
+sharkBlock.cpp sharkBlock.hpp
+sharkBlock.cpp sharkBuilder.hpp
+sharkBlock.cpp sharkConstant.hpp
+sharkBlock.cpp sharkState.hpp
+sharkBlock.cpp sharkValue.hpp
+
+sharkBlock.hpp allocation.hpp
+sharkBlock.hpp ciMethod.hpp
+sharkBlock.hpp ciStreams.hpp
+sharkBlock.hpp debug.hpp
+sharkBlock.hpp llvmHeaders.hpp
+sharkBlock.hpp sharkBuilder.hpp
+sharkBlock.hpp sharkConstant.hpp
+sharkBlock.hpp sharkInvariants.hpp
+sharkBlock.hpp sharkState.hpp
+sharkBlock.hpp sharkValue.hpp
+
+sharkBuilder.cpp ciMethod.hpp
+sharkBuilder.cpp debug.hpp
+sharkBuilder.cpp llvmHeaders.hpp
+sharkBuilder.cpp llvmValue.hpp
+sharkBuilder.cpp methodOop.hpp
+sharkBuilder.cpp os.hpp
+sharkBuilder.cpp resourceArea.hpp
+sharkBuilder.cpp llvmHeaders.hpp
+sharkBuilder.cpp sharkBuilder.hpp
+sharkBuilder.cpp sharkContext.hpp
+sharkBuilder.cpp sharkRuntime.hpp
+sharkBuilder.cpp synchronizer.hpp
+sharkBuilder.cpp thread.hpp
+
+sharkBuilder.hpp barrierSet.hpp
+sharkBuilder.hpp cardTableModRefBS.hpp
+sharkBuilder.hpp ciType.hpp
+sharkBuilder.hpp debug.hpp
+sharkBuilder.hpp llvmHeaders.hpp
+sharkBuilder.hpp llvmValue.hpp
+sharkBuilder.hpp sizes.hpp
+sharkBuilder.hpp sharkCodeBuffer.hpp
+sharkBuilder.hpp sharkType.hpp
+sharkBuilder.hpp sharkValue.hpp
+sharkBuilder.hpp sharkEntry.hpp
+
+sharkCacheDecache.cpp ciMethod.hpp
+sharkCacheDecache.cpp debugInfoRec.hpp
+sharkCacheDecache.cpp llvmValue.hpp
+sharkCacheDecache.cpp sharkBuilder.hpp
+sharkCacheDecache.cpp sharkCacheDecache.hpp
+sharkCacheDecache.cpp sharkFunction.hpp
+sharkCacheDecache.cpp sharkState.hpp
+
+sharkCacheDecache.hpp ciMethod.hpp
+sharkCacheDecache.hpp debugInfoRec.hpp
+sharkCacheDecache.hpp sharkBuilder.hpp
+sharkCacheDecache.hpp sharkFunction.hpp
+sharkCacheDecache.hpp sharkStateScanner.hpp
+
+sharkCodeBuffer.hpp allocation.hpp
+sharkCodeBuffer.hpp codeBuffer.hpp
+sharkCodeBuffer.hpp llvmHeaders.hpp
+
+sharkCompiler.cpp abstractCompiler.hpp
+sharkCompiler.cpp ciEnv.hpp
+sharkCompiler.cpp ciMethod.hpp
+sharkCompiler.cpp debug.hpp
+sharkCompiler.cpp debugInfoRec.hpp
+sharkCompiler.cpp dependencies.hpp
+sharkCompiler.cpp exceptionHandlerTable.hpp
+sharkCompiler.cpp llvmHeaders.hpp
+sharkCompiler.cpp oopMap.hpp
+sharkCompiler.cpp oopRecorder.hpp
+sharkCompiler.cpp shark_globals.hpp
+sharkCompiler.cpp sharkBuilder.hpp
+sharkCompiler.cpp sharkCodeBuffer.hpp
+sharkCompiler.cpp sharkCompiler.hpp
+sharkCompiler.cpp sharkContext.hpp
+sharkCompiler.cpp sharkEntry.hpp
+sharkCompiler.cpp sharkFunction.hpp
+sharkCompiler.cpp sharkMemoryManager.hpp
+sharkCompiler.cpp sharkNativeWrapper.hpp
+
+sharkCompiler.hpp abstractCompiler.hpp
+sharkCompiler.hpp ciEnv.hpp
+sharkCompiler.hpp ciMethod.hpp
+sharkCompiler.hpp compileBroker.hpp
+sharkCompiler.hpp llvmHeaders.hpp
+sharkCompiler.hpp sharkMemoryManager.hpp
+
+sharkContext.cpp arrayOop.hpp
+sharkContext.cpp globalDefinitions.hpp
+sharkContext.cpp llvmHeaders.hpp
+sharkContext.cpp oop.hpp
+sharkContext.cpp sharkContext.hpp
+
+sharkContext.hpp llvmHeaders.hpp
+sharkContext.hpp sharkCompiler.hpp
+
+sharkConstant.cpp ciInstance.hpp
+sharkConstant.cpp ciStreams.hpp
+sharkConstant.cpp sharkBuilder.hpp
+sharkConstant.cpp sharkConstant.hpp
+sharkConstant.cpp sharkValue.hpp
+
+sharkConstant.hpp allocation.hpp
+sharkConstant.hpp ciStreams.hpp
+sharkConstant.hpp sharkBuilder.hpp
+sharkConstant.hpp sharkValue.hpp
+
+sharkEntry.hpp llvmHeaders.hpp
+
+sharkFunction.cpp allocation.hpp
+sharkFunction.cpp ciTypeFlow.hpp
+sharkFunction.cpp debug.hpp
+sharkFunction.cpp llvmHeaders.hpp
+sharkFunction.cpp llvmValue.hpp
+sharkFunction.cpp shark_globals.hpp
+sharkFunction.cpp sharkBuilder.hpp
+sharkFunction.cpp sharkEntry.hpp
+sharkFunction.cpp sharkFunction.hpp
+sharkFunction.cpp sharkState.hpp
+sharkFunction.cpp sharkTopLevelBlock.hpp
+
+sharkFunction.hpp allocation.hpp
+sharkFunction.hpp ciEnv.hpp
+sharkFunction.hpp ciStreams.hpp
+sharkFunction.hpp ciTypeFlow.hpp
+sharkFunction.hpp llvmHeaders.hpp
+sharkFunction.hpp llvmValue.hpp
+sharkFunction.hpp sharkBuilder.hpp
+sharkFunction.hpp sharkContext.hpp
+sharkFunction.hpp sharkInvariants.hpp
+sharkFunction.hpp sharkStack.hpp
+
+sharkInliner.cpp allocation.hpp
+sharkInliner.cpp bytecodes.hpp
+sharkInliner.cpp ciField.hpp
+sharkInliner.cpp ciMethod.hpp
+sharkInliner.cpp ciStreams.hpp
+sharkInliner.cpp shark_globals.hpp
+sharkInliner.cpp sharkBlock.hpp
+sharkInliner.cpp sharkConstant.hpp
+sharkInliner.cpp sharkInliner.hpp
+sharkInliner.cpp sharkIntrinsics.hpp
+sharkInliner.cpp sharkState.hpp
+sharkInliner.cpp sharkValue.hpp
+
+sharkInliner.hpp allocation.hpp
+sharkInliner.hpp ciMethod.hpp
+sharkInliner.hpp llvmHeaders.hpp
+sharkInliner.hpp sharkState.hpp
+
+sharkIntrinsics.cpp ciMethod.hpp
+sharkIntrinsics.cpp llvmHeaders.hpp
+sharkIntrinsics.cpp shark_globals.hpp
+sharkIntrinsics.cpp sharkIntrinsics.hpp
+sharkIntrinsics.cpp sharkState.hpp
+sharkIntrinsics.cpp sharkValue.hpp
+
+sharkIntrinsics.hpp allocation.hpp
+sharkIntrinsics.hpp ciMethod.hpp
+sharkIntrinsics.hpp llvmHeaders.hpp
+sharkIntrinsics.hpp sharkState.hpp
+
+sharkInvariants.cpp sharkInvariants.hpp
+
+sharkInvariants.hpp allocation.hpp
+sharkInvariants.hpp ciEnv.hpp
+sharkInvariants.hpp ciMethod.hpp
+sharkInvariants.hpp ciInstanceKlass.hpp
+sharkInvariants.hpp ciTypeFlow.hpp
+sharkInvariants.hpp debugInfoRec.hpp
+sharkInvariants.hpp dependencies.hpp
+sharkInvariants.hpp llvmHeaders.hpp
+sharkInvariants.hpp sharkBuilder.hpp
+
+sharkMemoryManager.hpp llvmHeaders.hpp
+sharkMemoryManager.hpp sharkEntry.hpp
+
+sharkMemoryManager.cpp llvmHeaders.hpp
+sharkMemoryManager.cpp sharkEntry.hpp
+sharkMemoryManager.cpp sharkMemoryManager.hpp
+
+sharkNativeWrapper.cpp llvmHeaders.hpp
+sharkNativeWrapper.cpp sharkNativeWrapper.hpp
+sharkNativeWrapper.cpp sharkType.hpp
+
+sharkNativeWrapper.hpp handles.hpp
+sharkNativeWrapper.hpp llvmHeaders.hpp
+sharkNativeWrapper.hpp sharkBuilder.hpp
+sharkNativeWrapper.hpp sharkContext.hpp
+sharkNativeWrapper.hpp sharkInvariants.hpp
+sharkNativeWrapper.hpp sharkStack.hpp
+
+sharkRuntime.cpp biasedLocking.hpp
+sharkRuntime.cpp deoptimization.hpp
+sharkRuntime.cpp llvmHeaders.hpp
+sharkRuntime.cpp klassOop.hpp
+sharkRuntime.cpp sharkRuntime.hpp
+sharkRuntime.cpp stack_<arch>.inline.hpp
+sharkRuntime.cpp thread.hpp
+
+sharkRuntime.hpp allocation.hpp
+sharkRuntime.hpp llvmHeaders.hpp
+sharkRuntime.hpp llvmValue.hpp
+sharkRuntime.hpp klassOop.hpp
+sharkRuntime.hpp thread.hpp
+
+sharkStack.cpp llvmHeaders.hpp
+sharkStack.cpp sharkFunction.hpp
+sharkStack.cpp sharkNativeWrapper.hpp
+sharkStack.cpp sharkStack.hpp
+sharkStack.cpp sharkType.hpp
+
+sharkStack.hpp llvmHeaders.hpp
+sharkStack.hpp sharkInvariants.hpp
+sharkStack.hpp sharkType.hpp
+
+sharkState.cpp allocation.hpp
+sharkState.cpp ciType.hpp
+sharkState.cpp ciTypeFlow.hpp
+sharkState.cpp sharkBuilder.hpp
+sharkState.cpp sharkCacheDecache.hpp
+sharkState.cpp sharkState.hpp
+sharkState.cpp sharkTopLevelBlock.hpp
+sharkState.cpp sharkType.hpp
+sharkState.cpp sharkValue.hpp
+
+sharkState.hpp allocation.hpp
+sharkState.hpp ciMethod.hpp
+sharkState.hpp llvmHeaders.hpp
+sharkState.hpp sharkBuilder.hpp
+sharkState.hpp sharkInvariants.hpp
+sharkState.hpp sharkValue.hpp
+
+sharkStateScanner.cpp sharkState.hpp
+sharkStateScanner.cpp sharkStateScanner.hpp
+
+sharkStateScanner.hpp allocation.hpp
+sharkStateScanner.hpp llvmHeaders.hpp
+sharkStateScanner.hpp sharkFunction.hpp
+sharkStateScanner.hpp sharkInvariants.hpp
+
+sharkTopLevelBlock.cpp allocation.hpp
+sharkTopLevelBlock.cpp bytecodes.hpp
+sharkTopLevelBlock.cpp ciField.hpp
+sharkTopLevelBlock.cpp ciInstance.hpp
+sharkTopLevelBlock.cpp ciObjArrayKlass.hpp
+sharkTopLevelBlock.cpp ciStreams.hpp
+sharkTopLevelBlock.cpp ciType.hpp
+sharkTopLevelBlock.cpp ciTypeFlow.hpp
+sharkTopLevelBlock.cpp debug.hpp
+sharkTopLevelBlock.cpp deoptimization.hpp
+sharkTopLevelBlock.cpp llvmHeaders.hpp
+sharkTopLevelBlock.cpp llvmValue.hpp
+sharkTopLevelBlock.cpp shark_globals.hpp
+sharkTopLevelBlock.cpp sharkCacheDecache.hpp
+sharkTopLevelBlock.cpp sharkTopLevelBlock.hpp
+sharkTopLevelBlock.cpp sharkBuilder.hpp
+sharkTopLevelBlock.cpp sharkConstant.hpp
+sharkTopLevelBlock.cpp sharkInliner.hpp
+sharkTopLevelBlock.cpp sharkState.hpp
+sharkTopLevelBlock.cpp sharkValue.hpp
+
+sharkTopLevelBlock.hpp allocation.hpp
+sharkTopLevelBlock.hpp bytecodes.hpp
+sharkTopLevelBlock.hpp ciStreams.hpp
+sharkTopLevelBlock.hpp ciType.hpp
+sharkTopLevelBlock.hpp ciTypeFlow.hpp
+sharkTopLevelBlock.hpp llvmHeaders.hpp
+sharkTopLevelBlock.hpp sharkBlock.hpp
+sharkTopLevelBlock.hpp sharkBuilder.hpp
+sharkTopLevelBlock.hpp sharkFunction.hpp
+sharkTopLevelBlock.hpp sharkState.hpp
+sharkTopLevelBlock.hpp sharkValue.hpp
+
+sharkType.hpp allocation.hpp
+sharkType.hpp ciType.hpp
+sharkType.hpp globalDefinitions.hpp
+sharkType.hpp llvmHeaders.hpp
+sharkType.hpp sharkContext.hpp
+
+sharkValue.cpp ciType.hpp
+sharkValue.cpp llvmHeaders.hpp
+sharkValue.cpp llvmValue.hpp
+sharkValue.cpp sharkBuilder.hpp
+sharkValue.cpp sharkValue.hpp
+
+sharkValue.hpp allocation.hpp
+sharkValue.hpp ciType.hpp
+sharkValue.hpp llvmHeaders.hpp
+sharkValue.hpp llvmValue.hpp
+sharkValue.hpp sharkType.hpp
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -339,7 +339,8 @@
#define CHECK_NULL(obj_) \
if ((obj_) == NULL) { \
VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), ""); \
- }
+ } \
+ VERIFY_OOP(obj_)
#define VMdoubleConstZero() 0.0
#define VMdoubleConstOne() 1.0
@@ -509,7 +510,7 @@
/* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic,
/* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial,
-/* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,NULL, &&opc_new,
+/* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_default, &&opc_new,
/* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow,
/* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit,
@@ -539,6 +540,7 @@
// this will trigger a VERIFY_OOP on entry
if (istate->msg() != initialize && ! METHOD->is_static()) {
oop rcvr = LOCALS_OBJECT(0);
+ VERIFY_OOP(rcvr);
}
#endif
// #define HACK
@@ -547,7 +549,7 @@
#endif // HACK
/* QQQ this should be a stack method so we don't know actual direction */
- assert(istate->msg() == initialize ||
+ guarantee(istate->msg() == initialize ||
topOfStack >= istate->stack_limit() &&
topOfStack < istate->stack_base(),
"Stack top out of range");
@@ -613,6 +615,7 @@
rcvr = METHOD->constants()->pool_holder()->klass_part()->java_mirror();
} else {
rcvr = LOCALS_OBJECT(0);
+ VERIFY_OOP(rcvr);
}
// The initial monitor is ours for the taking
BasicObjectLock* mon = &istate->monitor_base()[-1];
@@ -735,6 +738,7 @@
case popping_frame: {
// returned from a java call to pop the frame, restart the call
// clear the message so we don't confuse ourselves later
+ ShouldNotReachHere(); // we don't return this.
assert(THREAD->pop_frame_in_process(), "wrong frame pop state");
istate->set_msg(no_request);
THREAD->clr_pop_frame_in_process();
@@ -801,6 +805,7 @@
// continue locking now that we have a monitor to use
// we expect to find newly allocated monitor at the "top" of the monitor stack.
oop lockee = STACK_OBJECT(-1);
+ VERIFY_OOP(lockee);
// derefing's lockee ought to provoke implicit null check
// find a free monitor
BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
@@ -911,6 +916,7 @@
/* load from local variable */
CASE(_aload):
+ VERIFY_OOP(LOCALS_OBJECT(pc[1]));
SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0);
UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
@@ -930,6 +936,7 @@
#undef OPC_LOAD_n
#define OPC_LOAD_n(num) \
CASE(_aload_##num): \
+ VERIFY_OOP(LOCALS_OBJECT(num)); \
SET_STACK_OBJECT(LOCALS_OBJECT(num), 0); \
UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
\
@@ -975,6 +982,7 @@
opcode = pc[1];
switch(opcode) {
case Bytecodes::_aload:
+ VERIFY_OOP(LOCALS_OBJECT(reg));
SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0);
UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
@@ -1099,7 +1107,7 @@
CASE(_i##opcname): \
if (test && (STACK_INT(-1) == 0)) { \
VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
- "/ by int zero"); \
+ "/ by zero"); \
} \
SET_STACK_INT(VMint##opname(STACK_INT(-2), \
STACK_INT(-1)), \
@@ -1277,7 +1285,12 @@
jfloat f;
jdouble r;
f = STACK_FLOAT(-1);
+#ifdef IA64
+ // IA64 gcc bug
+ r = ( f == 0.0f ) ? (jdouble) f : (jdouble) f + ia64_double_zero;
+#else
r = (jdouble) f;
+#endif
MORE_STACK(-1); // POP
SET_STACK_DOUBLE(r, 1);
UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
@@ -1471,6 +1484,7 @@
CASE(_return_register_finalizer): {
oop rcvr = LOCALS_OBJECT(0);
+ VERIFY_OOP(rcvr);
if (rcvr->klass()->klass_part()->has_finalizer()) {
CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception);
}
@@ -1561,6 +1575,7 @@
*/
CASE(_aastore): {
oop rhsObject = STACK_OBJECT(-1);
+ VERIFY_OOP(rhsObject);
ARRAY_INTRO( -3);
// arrObj, index are set
if (rhsObject != NULL) {
@@ -1703,6 +1718,7 @@
obj = (oop)NULL;
} else {
obj = (oop) STACK_OBJECT(-1);
+ VERIFY_OOP(obj);
}
CALL_VM(InterpreterRuntime::post_field_access(THREAD,
obj,
@@ -1728,6 +1744,7 @@
int field_offset = cache->f2();
if (cache->is_volatile()) {
if (tos_type == atos) {
+ VERIFY_OOP(obj->obj_field_acquire(field_offset));
SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1);
} else if (tos_type == itos) {
SET_STACK_INT(obj->int_field_acquire(field_offset), -1);
@@ -1748,6 +1765,7 @@
}
} else {
if (tos_type == atos) {
+ VERIFY_OOP(obj->obj_field(field_offset));
SET_STACK_OBJECT(obj->obj_field(field_offset), -1);
} else if (tos_type == itos) {
SET_STACK_INT(obj->int_field(field_offset), -1);
@@ -1799,6 +1817,7 @@
} else {
obj = (oop) STACK_OBJECT(-2);
}
+ VERIFY_OOP(obj);
}
CALL_VM(InterpreterRuntime::post_field_modification(THREAD,
@@ -1837,6 +1856,7 @@
if (tos_type == itos) {
obj->release_int_field_put(field_offset, STACK_INT(-1));
} else if (tos_type == atos) {
+ VERIFY_OOP(STACK_OBJECT(-1));
obj->release_obj_field_put(field_offset, STACK_OBJECT(-1));
OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)obj >> CardTableModRefBS::card_shift], 0);
} else if (tos_type == btos) {
@@ -1857,6 +1877,7 @@
if (tos_type == itos) {
obj->int_field_put(field_offset, STACK_INT(-1));
} else if (tos_type == atos) {
+ VERIFY_OOP(STACK_OBJECT(-1));
obj->obj_field_put(field_offset, STACK_OBJECT(-1));
OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)obj >> CardTableModRefBS::card_shift], 0);
} else if (tos_type == btos) {
@@ -1961,6 +1982,7 @@
}
CASE(_checkcast):
if (STACK_OBJECT(-1) != NULL) {
+ VERIFY_OOP(STACK_OBJECT(-1));
u2 index = Bytes::get_Java_u2(pc+1);
if (ProfileInterpreter) {
// needs Profile_checkcast QQQ
@@ -1999,6 +2021,7 @@
if (STACK_OBJECT(-1) == NULL) {
SET_STACK_INT(0, -1);
} else {
+ VERIFY_OOP(STACK_OBJECT(-1));
u2 index = Bytes::get_Java_u2(pc+1);
// Constant pool may have actual klass or unresolved klass. If it is
// unresolved we must resolve it
@@ -2044,10 +2067,12 @@
break;
case JVM_CONSTANT_String:
+ VERIFY_OOP(constants->resolved_string_at(index));
SET_STACK_OBJECT(constants->resolved_string_at(index), 0);
break;
case JVM_CONSTANT_Class:
+ VERIFY_OOP(constants->resolved_klass_at(index)->klass_part()->java_mirror());
SET_STACK_OBJECT(constants->resolved_klass_at(index)->klass_part()->java_mirror(), 0);
break;
@@ -2059,17 +2084,6 @@
THREAD->set_vm_result(NULL);
break;
-#if 0
- CASE(_fast_igetfield):
- CASE(_fastagetfield):
- CASE(_fast_aload_0):
- CASE(_fast_iaccess_0):
- CASE(__fast_aaccess_0):
- CASE(_fast_linearswitch):
- CASE(_fast_binaryswitch):
- fatal("unsupported fast bytecode");
-#endif
-
default: ShouldNotReachHere();
}
UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
@@ -2122,6 +2136,7 @@
// get receiver
int parms = cache->parameter_size();
// Same comments as invokevirtual apply here
+ VERIFY_OOP(STACK_OBJECT(-parms));
instanceKlass* rcvrKlass = (instanceKlass*)
STACK_OBJECT(-parms)->klass()->klass_part();
callee = (methodOop) rcvrKlass->start_of_vtable()[ cache->f2()];
@@ -2205,6 +2220,7 @@
// this fails with an assert
// instanceKlass* rcvrKlass = instanceKlass::cast(STACK_OBJECT(-parms)->klass());
// but this works
+ VERIFY_OOP(STACK_OBJECT(-parms));
instanceKlass* rcvrKlass = (instanceKlass*) STACK_OBJECT(-parms)->klass()->klass_part();
/*
Executing this code in java.lang.String:
@@ -2651,14 +2667,14 @@
LOCALS_SLOT(METHOD->size_of_parameters() - 1));
THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit);
}
- UPDATE_PC_AND_RETURN(1);
- } else {
- // Normal return
- // Advance the pc and return to frame manager
- istate->set_msg(return_from_method);
- istate->set_return_kind((Bytecodes::Code)opcode);
- UPDATE_PC_AND_RETURN(1);
+ THREAD->clr_pop_frame_in_process();
}
+
+ // Normal return
+ // Advance the pc and return to frame manager
+ istate->set_msg(return_from_method);
+ istate->set_return_kind((Bytecodes::Code)opcode);
+ UPDATE_PC_AND_RETURN(1);
} /* handle_return: */
// This is really a fatal error return
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -440,7 +440,7 @@
* iushr, ishl, and ishr bytecodes, respectively.
*/
-static jint VMintUshr(jint op, jint num);
+static juint VMintUshr(jint op, jint num);
static jint VMintShl (jint op, jint num);
static jint VMintShr (jint op, jint num);
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -27,14 +27,11 @@
#ifdef CC_INTERP
#ifdef ASSERT
-extern "C" { typedef void (*verify_oop_fn_t)(oop, const char *);};
-#define VERIFY_OOP(o) \
- /*{ verify_oop_fn_t verify_oop_entry = \
- *StubRoutines::verify_oop_subroutine_entry_address(); \
- if (verify_oop_entry) { \
- (*verify_oop_entry)((o), "Not an oop!"); \
- } \
- }*/
+#define VERIFY_OOP(o_) \
+ if (VerifyOops) { \
+ assert((oop(o_))->is_oop_or_null(), "Not an oop!"); \
+ StubRoutines::_verify_oop_count++; \
+ }
#else
#define VERIFY_OOP(o)
#endif
--- a/hotspot/src/share/vm/interpreter/interpreter.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/interpreter.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -41,20 +41,20 @@
}
-void InterpreterCodelet::print() {
+void InterpreterCodelet::print_on(outputStream* st) const {
if (PrintInterpreter) {
- tty->cr();
- tty->print_cr("----------------------------------------------------------------------");
+ st->cr();
+ st->print_cr("----------------------------------------------------------------------");
}
- if (description() != NULL) tty->print("%s ", description());
- if (bytecode() >= 0 ) tty->print("%d %s ", bytecode(), Bytecodes::name(bytecode()));
- tty->print_cr("[" INTPTR_FORMAT ", " INTPTR_FORMAT "] %d bytes",
+ if (description() != NULL) st->print("%s ", description());
+ if (bytecode() >= 0 ) st->print("%d %s ", bytecode(), Bytecodes::name(bytecode()));
+ st->print_cr("[" INTPTR_FORMAT ", " INTPTR_FORMAT "] %d bytes",
code_begin(), code_end(), code_size());
if (PrintInterpreter) {
- tty->cr();
- Disassembler::decode(code_begin(), code_end(), tty);
+ st->cr();
+ Disassembler::decode(code_begin(), code_end(), st);
}
}
--- a/hotspot/src/share/vm/interpreter/interpreter.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/interpreter.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -52,7 +52,8 @@
// Debugging
void verify();
- void print();
+ void print_on(outputStream* st) const;
+ void print() const { print_on(tty); }
// Interpreter-specific initialization
void initialize(const char* description, Bytecodes::Code bytecode);
--- a/hotspot/src/share/vm/interpreter/oopMapCache.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/oopMapCache.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -281,9 +281,7 @@
public:
void pass_int() { /* ignore */ }
void pass_long() { /* ignore */ }
-#if defined(_LP64) || defined(ZERO)
void pass_float() { /* ignore */ }
-#endif
void pass_double() { /* ignore */ }
void pass_object() { set_one(offset()); }
--- a/hotspot/src/share/vm/memory/allocation.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/memory/allocation.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -43,24 +43,73 @@
switch (type) {
case C_HEAP:
res = (address)AllocateHeap(size, "C_Heap: ResourceOBJ");
+ DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
break;
case RESOURCE_AREA:
+ // new(size) sets allocation type RESOURCE_AREA.
res = (address)operator new(size);
break;
default:
ShouldNotReachHere();
}
- // Set allocation type in the resource object for assertion checks.
- DEBUG_ONLY(((ResourceObj *)res)->_allocation = type;)
return res;
}
void ResourceObj::operator delete(void* p) {
assert(((ResourceObj *)p)->allocated_on_C_heap(),
"delete only allowed for C_HEAP objects");
+ DEBUG_ONLY(((ResourceObj *)p)->_allocation = badHeapOopVal;)
FreeHeap(p);
}
+#ifdef ASSERT
+void ResourceObj::set_allocation_type(address res, allocation_type type) {
+ // Set allocation type in the resource object
+ uintptr_t allocation = (uintptr_t)res;
+ assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least");
+ assert(type <= allocation_mask, "incorrect allocation type");
+ ((ResourceObj *)res)->_allocation = ~(allocation + type);
+}
+
+ResourceObj::allocation_type ResourceObj::get_allocation_type() const {
+ assert(~(_allocation | allocation_mask) == (uintptr_t)this, "lost resource object");
+ return (allocation_type)((~_allocation) & allocation_mask);
+}
+
+ResourceObj::ResourceObj() { // default constructor
+ if (~(_allocation | allocation_mask) != (uintptr_t)this) {
+ set_allocation_type((address)this, STACK_OR_EMBEDDED);
+ } else if (allocated_on_stack()) {
+ // For some reason we got a value which looks like an allocation on stack.
+ // Pass if it is really allocated on stack.
+ assert(Thread::current()->on_local_stack((address)this),"should be on stack");
+ } else {
+ assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena(),
+ "allocation_type should be set by operator new()");
+ }
+}
+
+ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor
+ // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
+ set_allocation_type((address)this, STACK_OR_EMBEDDED);
+}
+
+ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
+ // Used in InlineTree::ok_to_inline() for WarmCallInfo.
+ assert(allocated_on_stack(), "copy only into local");
+ // Keep current _allocation value;
+ return *this;
+}
+
+ResourceObj::~ResourceObj() {
+ // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
+ if (!allocated_on_C_heap()) { // ResourceObj::delete() zaps _allocation for C_heap.
+ _allocation = badHeapOopVal; // zap type
+ }
+}
+#endif // ASSERT
+
+
void trace_heap_malloc(size_t size, const char* name, void* p) {
// A lock is not needed here - tty uses a lock internally
tty->print_cr("Heap malloc " INTPTR_FORMAT " %7d %s", p, size, name == NULL ? "" : name);
@@ -166,32 +215,40 @@
_medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
_small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size());
}
+
+ static void clean() {
+ enum { BlocksToKeep = 5 };
+ _small_pool->free_all_but(BlocksToKeep);
+ _medium_pool->free_all_but(BlocksToKeep);
+ _large_pool->free_all_but(BlocksToKeep);
+ }
};
ChunkPool* ChunkPool::_large_pool = NULL;
ChunkPool* ChunkPool::_medium_pool = NULL;
ChunkPool* ChunkPool::_small_pool = NULL;
-
void chunkpool_init() {
ChunkPool::initialize();
}
+void
+Chunk::clean_chunk_pool() {
+ ChunkPool::clean();
+}
+
//--------------------------------------------------------------------------------------
// ChunkPoolCleaner implementation
+//
class ChunkPoolCleaner : public PeriodicTask {
- enum { CleaningInterval = 5000, // cleaning interval in ms
- BlocksToKeep = 5 // # of extra blocks to keep
- };
+ enum { CleaningInterval = 5000 }; // cleaning interval in ms
public:
ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {}
void task() {
- ChunkPool::small_pool()->free_all_but(BlocksToKeep);
- ChunkPool::medium_pool()->free_all_but(BlocksToKeep);
- ChunkPool::large_pool()->free_all_but(BlocksToKeep);
+ ChunkPool::clean();
}
};
--- a/hotspot/src/share/vm/memory/allocation.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/memory/allocation.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -174,9 +174,10 @@
// Start the chunk_pool cleaner task
static void start_chunk_pool_cleaner_task();
+
+ static void clean_chunk_pool();
};
-
//------------------------------Arena------------------------------------------
// Fast allocation of memory
class Arena: public CHeapObj {
@@ -316,32 +317,36 @@
// use delete to deallocate.
class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
public:
- enum allocation_type { UNKNOWN = 0, C_HEAP, RESOURCE_AREA, ARENA };
+ enum allocation_type { STACK_OR_EMBEDDED = 0, RESOURCE_AREA, C_HEAP, ARENA, allocation_mask = 0x3 };
+ static void set_allocation_type(address res, allocation_type type) NOT_DEBUG_RETURN;
#ifdef ASSERT
private:
- allocation_type _allocation;
+ // When this object is allocated on stack the new() operator is not
+ // called but garbage on stack may look like a valid allocation_type.
+ // Store negated 'this' pointer when new() is called to distinguish cases.
+ uintptr_t _allocation;
public:
- bool allocated_on_C_heap() { return _allocation == C_HEAP; }
+ allocation_type get_allocation_type() const;
+ bool allocated_on_stack() const { return get_allocation_type() == STACK_OR_EMBEDDED; }
+ bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; }
+ bool allocated_on_C_heap() const { return get_allocation_type() == C_HEAP; }
+ bool allocated_on_arena() const { return get_allocation_type() == ARENA; }
+ ResourceObj(); // default construtor
+ ResourceObj(const ResourceObj& r); // default copy construtor
+ ResourceObj& operator=(const ResourceObj& r); // default copy assignment
+ ~ResourceObj();
#endif // ASSERT
public:
void* operator new(size_t size, allocation_type type);
void* operator new(size_t size, Arena *arena) {
address res = (address)arena->Amalloc(size);
- // Set allocation type in the resource object
- DEBUG_ONLY(((ResourceObj *)res)->_allocation = ARENA;)
+ DEBUG_ONLY(set_allocation_type(res, ARENA);)
return res;
}
void* operator new(size_t size) {
address res = (address)resource_allocate_bytes(size);
- // Set allocation type in the resource object
- DEBUG_ONLY(((ResourceObj *)res)->_allocation = RESOURCE_AREA;)
- return res;
- }
- void* operator new(size_t size, void* where, allocation_type type) {
- void* res = where;
- // Set allocation type in the resource object
- DEBUG_ONLY(((ResourceObj *)res)->_allocation = type;)
+ DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
return res;
}
void operator delete(void* p);
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,7 @@
friend class VMStructs;
friend class CardTableRS;
friend class CheckForUnmarkedOops; // Needs access to raw card bytes.
+ friend class SharkBuilder;
#ifndef PRODUCT
// For debugging.
friend class GuaranteeNotModClosure;
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -941,7 +941,9 @@
VerifyBeforeExit ||
PrintAssembly ||
tty->count() != 0 || // already printing
- VerifyAfterGC, "too expensive");
+ VerifyAfterGC ||
+ VMError::fatal_error_in_progress(), "too expensive");
+
#endif
// This might be sped up with a cache of the last generation that
// answered yes.
--- a/hotspot/src/share/vm/memory/generation.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/memory/generation.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -131,7 +131,9 @@
enum SomePublicConstants {
// Generations are GenGrain-aligned and have size that are multiples of
// GenGrain.
- LogOfGenGrain = 16,
+ // Note: on ARM we add 1 bit for card_table_base to be properly aligned
+ // (we expect its low byte to be zero - see implementation of post_barrier)
+ LogOfGenGrain = 16 ARM_ONLY(+1),
GenGrain = 1 << LogOfGenGrain
};
--- a/hotspot/src/share/vm/oops/arrayKlass.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/arrayKlass.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -179,8 +179,6 @@
return JVMTI_CLASS_STATUS_ARRAY;
}
-#ifndef PRODUCT
-
// Printing
void arrayKlass::oop_print_on(oop obj, outputStream* st) {
@@ -189,8 +187,6 @@
st->print_cr(" - length: %d", arrayOop(obj)->length());
}
-#endif
-
// Verification
void arrayKlass::oop_verify_on(oop obj, outputStream* st) {
--- a/hotspot/src/share/vm/oops/arrayKlass.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/arrayKlass.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -115,20 +115,15 @@
// Return a handle.
static void complete_create_array_klass(arrayKlassHandle k, KlassHandle super_klass, TRAPS);
- public:
- // jvm support
- jint compute_modifier_flags(TRAPS) const;
+ // jvm support
+ jint compute_modifier_flags(TRAPS) const;
- public:
- // JVMTI support
- jint jvmti_class_status() const;
+ // JVMTI support
+ jint jvmti_class_status() const;
-#ifndef PRODUCT
- public:
// Printing
void oop_print_on(oop obj, outputStream* st);
-#endif
- public:
+
// Verification
void oop_verify_on(oop obj, outputStream* st);
};
--- a/hotspot/src/share/vm/oops/arrayKlassKlass.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/arrayKlassKlass.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -151,15 +151,12 @@
}
#endif // SERIALGC
-#ifndef PRODUCT
-
// Printing
void arrayKlassKlass::oop_print_on(oop obj, outputStream* st) {
assert(obj->is_klass(), "must be klass");
klassKlass::oop_print_on(obj, st);
}
-#endif //PRODUCT
void arrayKlassKlass::oop_print_value_on(oop obj, outputStream* st) {
assert(obj->is_klass(), "must be klass");
--- a/hotspot/src/share/vm/oops/arrayKlassKlass.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/arrayKlassKlass.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -55,12 +55,9 @@
int oop_oop_iterate(oop obj, OopClosure* blk);
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
- public:
// Printing
void oop_print_value_on(oop obj, outputStream* st);
-#ifndef PRODUCT
void oop_print_on(oop obj, outputStream* st);
-#endif //PRODUCT
// Verification
const char* internal_name() const;
--- a/hotspot/src/share/vm/oops/compiledICHolderKlass.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/compiledICHolderKlass.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -154,8 +154,6 @@
}
#endif // SERIALGC
-#ifndef PRODUCT
-
// Printing
void compiledICHolderKlass::oop_print_on(oop obj, outputStream* st) {
@@ -166,8 +164,6 @@
st->print(" - klass: "); c->holder_klass()->print_value_on(st); st->cr();
}
-#endif //PRODUCT
-
void compiledICHolderKlass::oop_print_value_on(oop obj, outputStream* st) {
assert(obj->is_compiledICHolder(), "must be compiledICHolder");
Klass::oop_print_value_on(obj, st);
--- a/hotspot/src/share/vm/oops/compiledICHolderKlass.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/compiledICHolderKlass.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -68,12 +68,9 @@
int oop_oop_iterate(oop obj, OopClosure* blk);
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
- public:
// Printing
void oop_print_value_on(oop obj, outputStream* st);
-#ifndef PRODUCT
void oop_print_on (oop obj, outputStream* st);
-#endif //PRODUCT
// Verification
const char* internal_name() const;
--- a/hotspot/src/share/vm/oops/constMethodKlass.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/constMethodKlass.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -197,8 +197,6 @@
}
#endif // SERIALGC
-#ifndef PRODUCT
-
// Printing
void constMethodKlass::oop_print_on(oop obj, outputStream* st) {
@@ -216,8 +214,6 @@
}
}
-#endif //PRODUCT
-
// Short version of printing constMethodOop - just print the name of the
// method it belongs to.
void constMethodKlass::oop_print_value_on(oop obj, outputStream* st) {
--- a/hotspot/src/share/vm/oops/constMethodKlass.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/constMethodKlass.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -77,12 +77,9 @@
int oop_oop_iterate(oop obj, OopClosure* blk);
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
- public:
// Printing
void oop_print_value_on(oop obj, outputStream* st);
-#ifndef PRODUCT
void oop_print_on (oop obj, outputStream* st);
-#endif //PRODUCT
// Verify operations
const char* internal_name() const;
--- a/hotspot/src/share/vm/oops/constantPoolKlass.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/constantPoolKlass.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -299,8 +299,6 @@
}
#endif // SERIALGC
-#ifndef PRODUCT
-
// Printing
void constantPoolKlass::oop_print_on(oop obj, outputStream* st) {
@@ -392,8 +390,6 @@
st->cr();
}
-#endif
-
void constantPoolKlass::oop_print_value_on(oop obj, outputStream* st) {
assert(obj->is_constantPool(), "must be constantPool");
constantPoolOop cp = constantPoolOop(obj);
--- a/hotspot/src/share/vm/oops/constantPoolKlass.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/constantPoolKlass.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -61,18 +61,13 @@
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
// Allocation profiling support
- // no idea why this is pure virtual and not in Klass ???
juint alloc_size() const { return _alloc_size; }
void set_alloc_size(juint n) { _alloc_size = n; }
- public:
// Printing
void oop_print_value_on(oop obj, outputStream* st);
-#ifndef PRODUCT
void oop_print_on(oop obj, outputStream* st);
-#endif
- public:
// Verification
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
--- a/hotspot/src/share/vm/oops/cpCacheKlass.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/cpCacheKlass.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -248,8 +248,6 @@
}
#endif // SERIALGC
-#ifndef PRODUCT
-
void constantPoolCacheKlass::oop_print_on(oop obj, outputStream* st) {
assert(obj->is_constantPoolCache(), "obj must be constant pool cache");
constantPoolCacheOop cache = (constantPoolCacheOop)obj;
@@ -259,8 +257,6 @@
for (int i = 0; i < cache->length(); i++) cache->entry_at(i)->print(st, i);
}
-#endif
-
void constantPoolCacheKlass::oop_print_value_on(oop obj, outputStream* st) {
assert(obj->is_constantPoolCache(), "obj must be constant pool cache");
constantPoolCacheOop cache = (constantPoolCacheOop)obj;
--- a/hotspot/src/share/vm/oops/cpCacheKlass.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/cpCacheKlass.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -61,14 +61,10 @@
juint alloc_size() const { return _alloc_size; }
void set_alloc_size(juint n) { _alloc_size = n; }
- public:
// Printing
void oop_print_value_on(oop obj, outputStream* st);
-#ifndef PRODUCT
void oop_print_on(oop obj, outputStream* st);
-#endif
- public:
// Verification
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
--- a/hotspot/src/share/vm/oops/generateOopMap.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/generateOopMap.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -2111,7 +2111,13 @@
// We do not distinguish between different types of errors for verification
// errors. Let the verifier give a better message.
const char *msg = "Illegal class file encountered. Try running with -Xverify:all";
- error_work(msg, NULL);
+ _got_error = true;
+ // Append method name
+ char msg_buffer2[512];
+ jio_snprintf(msg_buffer2, sizeof(msg_buffer2), "%s in method %s", msg,
+ method()->name()->as_C_string());
+ _exception = Exceptions::new_exception(Thread::current(),
+ vmSymbols::java_lang_LinkageError(), msg_buffer2);
}
//
--- a/hotspot/src/share/vm/oops/klass.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/klass.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -520,8 +520,6 @@
return 0;
}
-#ifndef PRODUCT
-
// Printing
void Klass::oop_print_on(oop obj, outputStream* st) {
@@ -541,8 +539,6 @@
st->cr();
}
-#endif //PRODUCT
-
void Klass::oop_print_value_on(oop obj, outputStream* st) {
// print title
ResourceMark rm; // Cannot print in debug mode without this
--- a/hotspot/src/share/vm/oops/klass.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/klass.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -772,16 +772,12 @@
// jvm support
virtual jint compute_modifier_flags(TRAPS) const;
- public:
// JVMTI support
virtual jint jvmti_class_status() const;
- public:
// Printing
virtual void oop_print_value_on(oop obj, outputStream* st);
-#ifndef PRODUCT
virtual void oop_print_on (oop obj, outputStream* st);
-#endif //PRODUCT
// Verification
virtual const char* internal_name() const = 0;
--- a/hotspot/src/share/vm/oops/klassKlass.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/klassKlass.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -194,16 +194,12 @@
#endif // SERIALGC
-#ifndef PRODUCT
-
// Printing
void klassKlass::oop_print_on(oop obj, outputStream* st) {
Klass::oop_print_on(obj, st);
}
-#endif //PRODUCT
-
void klassKlass::oop_print_value_on(oop obj, outputStream* st) {
Klass::oop_print_value_on(obj, st);
}
--- a/hotspot/src/share/vm/oops/klassKlass.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/klassKlass.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -67,12 +67,9 @@
juint alloc_size() const { return _alloc_size; }
void set_alloc_size(juint n) { _alloc_size = n; }
- public:
// Printing
void oop_print_value_on(oop obj, outputStream* st);
-#ifndef PRODUCT
void oop_print_on (oop obj, outputStream* st);
-#endif //PRODUCT
// Verification
const char* internal_name() const;
--- a/hotspot/src/share/vm/oops/methodOop.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/methodOop.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -751,10 +751,14 @@
}
OrderAccess::storestore();
+#ifdef SHARK
+ mh->_from_interpreted_entry = code->instructions_begin();
+#else
mh->_from_compiled_entry = code->verified_entry_point();
OrderAccess::storestore();
// Instantly compiled code can execute.
mh->_from_interpreted_entry = mh->get_i2c_entry();
+#endif // SHARK
}
--- a/hotspot/src/share/vm/oops/oop.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/oops/oop.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -29,15 +29,6 @@
BarrierSet* oopDesc::_bs = NULL;
-#ifdef PRODUCT
-void oopDesc::print_on(outputStream* st) const {}
-void oopDesc::print_address_on(outputStream* st) const {}
-char* oopDesc::print_string() { return NULL; }
-void oopDesc::print() {}
-void oopDesc::print_address() {}
-
-#else //PRODUCT
-
void oopDesc::print_on(outputStream* st) const {
if (this == NULL) {
st->print_cr("NULL");
@@ -62,10 +53,6 @@
return st.as_string();
}
-#endif // PRODUCT
-
-// The print_value functions are present in all builds, to support the disassembler.
-
void oopDesc::print_value() {
print_value_on(tty);
}
@@ -83,9 +70,7 @@
st->print("NULL");
} else if (java_lang_String::is_instance(obj)) {
java_lang_String::print(obj, st);
-#ifndef PRODUCT
if (PrintOopAddress) print_address_on(st);
-#endif //PRODUCT
#ifdef ASSERT
} else if (!Universe::heap()->is_in(obj) || !Universe::heap()->is_in(klass())) {
st->print("### BAD OOP %p ###", (address)obj);
--- a/hotspot/src/share/vm/opto/block.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/block.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -353,7 +353,8 @@
PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) :
Phase(CFG),
_bbs(a),
- _root(r)
+ _root(r),
+ _node_latency(NULL)
#ifndef PRODUCT
, _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
#endif
--- a/hotspot/src/share/vm/opto/block.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/block.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -374,7 +374,7 @@
float _outer_loop_freq; // Outmost loop frequency
// Per node latency estimation, valid only during GCM
- GrowableArray<uint> _node_latency;
+ GrowableArray<uint> *_node_latency;
#ifndef PRODUCT
bool _trace_opto_pipelining; // tracing flag
--- a/hotspot/src/share/vm/opto/c2_globals.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -281,6 +281,12 @@
product(bool, InsertMemBarAfterArraycopy, true, \
"Insert memory barrier after arraycopy call") \
\
+ develop(bool, SubsumeLoads, true, \
+ "Attempt to compile while subsuming loads into machine instructions.") \
+ \
+ develop(bool, StressRecompilation, false, \
+ "Recompile each compiled method without subsuming loads or escape analysis.") \
+ \
/* controls for tier 1 compilations */ \
\
develop(bool, Tier1CountInvocations, true, \
--- a/hotspot/src/share/vm/opto/c2compiler.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/c2compiler.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -103,13 +103,14 @@
if (!is_initialized()) {
initialize();
}
- bool subsume_loads = true;
+ bool subsume_loads = SubsumeLoads;
bool do_escape_analysis = DoEscapeAnalysis &&
!env->jvmti_can_access_local_variables();
while (!env->failing()) {
// Attempt to compile while subsuming loads into machine instructions.
Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis);
+
// Check result and retry if appropriate.
if (C.failure_reason() != NULL) {
if (C.failure_reason_is(retry_no_subsuming_loads())) {
@@ -127,6 +128,16 @@
// on the ciEnv via env->record_method_not_compilable().
env->record_failure(C.failure_reason());
}
+ if (StressRecompilation) {
+ if (subsume_loads) {
+ subsume_loads = false;
+ continue; // retry
+ }
+ if (do_escape_analysis) {
+ do_escape_analysis = false;
+ continue; // retry
+ }
+ }
// No retry; just break the loop.
break;
--- a/hotspot/src/share/vm/opto/chaitin.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/chaitin.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -569,7 +569,7 @@
if (trace_spilling() && lrg._def != NULL) {
// collect defs for MultiDef printing
if (lrg._defs == NULL) {
- lrg._defs = new (_ifg->_arena) GrowableArray<Node*>();
+ lrg._defs = new (_ifg->_arena) GrowableArray<Node*>(_ifg->_arena, 2, 0, NULL);
lrg._defs->append(lrg._def);
}
lrg._defs->append(n);
--- a/hotspot/src/share/vm/opto/compile.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/compile.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -904,8 +904,8 @@
probe_alias_cache(NULL)->_index = AliasIdxTop;
_intrinsics = NULL;
- _macro_nodes = new GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
- _predicate_opaqs = new GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
+ _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
+ _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
register_library_intrinsics();
}
--- a/hotspot/src/share/vm/opto/gcm.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/gcm.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -841,7 +841,7 @@
#ifndef PRODUCT
if (trace_opto_pipelining()) {
tty->print("# latency_to_inputs: node_latency[%d] = %d for node",
- n->_idx, _node_latency.at_grow(n->_idx));
+ n->_idx, _node_latency->at_grow(n->_idx));
dump();
}
#endif
@@ -853,7 +853,7 @@
return;
uint nlen = n->len();
- uint use_latency = _node_latency.at_grow(n->_idx);
+ uint use_latency = _node_latency->at_grow(n->_idx);
uint use_pre_order = _bbs[n->_idx]->_pre_order;
for ( uint j=0; j<nlen; j++ ) {
@@ -884,15 +884,15 @@
uint delta_latency = n->latency(j);
uint current_latency = delta_latency + use_latency;
- if (_node_latency.at_grow(def->_idx) < current_latency) {
- _node_latency.at_put_grow(def->_idx, current_latency);
+ if (_node_latency->at_grow(def->_idx) < current_latency) {
+ _node_latency->at_put_grow(def->_idx, current_latency);
}
#ifndef PRODUCT
if (trace_opto_pipelining()) {
tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d",
use_latency, j, delta_latency, current_latency, def->_idx,
- _node_latency.at_grow(def->_idx));
+ _node_latency->at_grow(def->_idx));
}
#endif
}
@@ -926,7 +926,7 @@
return 0;
uint nlen = use->len();
- uint nl = _node_latency.at_grow(use->_idx);
+ uint nl = _node_latency->at_grow(use->_idx);
for ( uint j=0; j<nlen; j++ ) {
if (use->in(j) == n) {
@@ -962,7 +962,7 @@
#ifndef PRODUCT
if (trace_opto_pipelining()) {
tty->print("# latency_from_outputs: node_latency[%d] = %d for node",
- n->_idx, _node_latency.at_grow(n->_idx));
+ n->_idx, _node_latency->at_grow(n->_idx));
dump();
}
#endif
@@ -975,7 +975,7 @@
if (latency < l) latency = l;
}
- _node_latency.at_put_grow(n->_idx, latency);
+ _node_latency->at_put_grow(n->_idx, latency);
}
//------------------------------hoist_to_cheaper_block-------------------------
@@ -985,9 +985,9 @@
const double delta = 1+PROB_UNLIKELY_MAG(4);
Block* least = LCA;
double least_freq = least->_freq;
- uint target = _node_latency.at_grow(self->_idx);
- uint start_latency = _node_latency.at_grow(LCA->_nodes[0]->_idx);
- uint end_latency = _node_latency.at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
+ uint target = _node_latency->at_grow(self->_idx);
+ uint start_latency = _node_latency->at_grow(LCA->_nodes[0]->_idx);
+ uint end_latency = _node_latency->at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
bool in_latency = (target <= start_latency);
const Block* root_block = _bbs[_root->_idx];
@@ -1005,7 +1005,7 @@
#ifndef PRODUCT
if (trace_opto_pipelining()) {
tty->print("# Find cheaper block for latency %d: ",
- _node_latency.at_grow(self->_idx));
+ _node_latency->at_grow(self->_idx));
self->dump();
tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
LCA->_pre_order,
@@ -1032,9 +1032,9 @@
if (mach && LCA == root_block)
break;
- uint start_lat = _node_latency.at_grow(LCA->_nodes[0]->_idx);
+ uint start_lat = _node_latency->at_grow(LCA->_nodes[0]->_idx);
uint end_idx = LCA->end_idx();
- uint end_lat = _node_latency.at_grow(LCA->_nodes[end_idx]->_idx);
+ uint end_lat = _node_latency->at_grow(LCA->_nodes[end_idx]->_idx);
double LCA_freq = LCA->_freq;
#ifndef PRODUCT
if (trace_opto_pipelining()) {
@@ -1073,7 +1073,7 @@
tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
}
#endif
- _node_latency.at_put_grow(self->_idx, end_latency);
+ _node_latency->at_put_grow(self->_idx, end_latency);
partial_latency_of_defs(self);
}
@@ -1255,8 +1255,7 @@
// Compute the latency information (via backwards walk) for all the
// instructions in the graph
- GrowableArray<uint> node_latency;
- _node_latency = node_latency;
+ _node_latency = new GrowableArray<uint>(); // resource_area allocation
if( C->do_scheduling() )
ComputeLatenciesBackwards(visited, stack);
@@ -1341,6 +1340,8 @@
}
}
#endif
+ // Dead.
+ _node_latency = (GrowableArray<uint> *)0xdeadbeef;
}
--- a/hotspot/src/share/vm/opto/lcm.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/lcm.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -113,7 +113,8 @@
if( !m->is_Mach() ) continue;
MachNode *mach = m->as_Mach();
was_store = false;
- switch( mach->ideal_Opcode() ) {
+ int iop = mach->ideal_Opcode();
+ switch( iop ) {
case Op_LoadB:
case Op_LoadUS:
case Op_LoadD:
@@ -155,6 +156,12 @@
default: // Also check for embedded loads
if( !mach->needs_anti_dependence_check() )
continue; // Not an memory op; skip it
+ if( must_clone[iop] ) {
+ // Do not move nodes which produce flags because
+ // RA will try to clone it to place near branch and
+ // it will cause recompilation, see clone_node().
+ continue;
+ }
{
// Check that value is used in memory address in
// instructions with embedded load (CmpP val1,(val2+off)).
@@ -461,7 +468,7 @@
n_choice = 1;
}
- uint n_latency = cfg->_node_latency.at_grow(n->_idx);
+ uint n_latency = cfg->_node_latency->at_grow(n->_idx);
uint n_score = n->req(); // Many inputs get high score to break ties
// Keep best latency found
@@ -738,7 +745,7 @@
Node *n = _nodes[j];
int idx = n->_idx;
tty->print("# ready cnt:%3d ", ready_cnt[idx]);
- tty->print("latency:%3d ", cfg->_node_latency.at_grow(idx));
+ tty->print("latency:%3d ", cfg->_node_latency->at_grow(idx));
tty->print("%4d: %s\n", idx, n->Name());
}
}
@@ -765,7 +772,7 @@
#ifndef PRODUCT
if (cfg->trace_opto_pipelining()) {
tty->print("# select %d: %s", n->_idx, n->Name());
- tty->print(", latency:%d", cfg->_node_latency.at_grow(n->_idx));
+ tty->print(", latency:%d", cfg->_node_latency->at_grow(n->_idx));
n->dump();
if (Verbose) {
tty->print("# ready list:");
@@ -957,6 +964,8 @@
Block *sb = _succs[i];
// Clone the entire area; ignoring the edge fixup for now.
for( uint j = end; j > beg; j-- ) {
+ // It is safe here to clone a node with anti_dependence
+ // since clones dominate on each path.
Node *clone = _nodes[j-1]->clone();
sb->_nodes.insert( 1, clone );
bbs.map(clone->_idx,sb);
--- a/hotspot/src/share/vm/opto/macro.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/macro.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -720,7 +720,7 @@
if (basic_elem_type == T_OBJECT || basic_elem_type == T_ARRAY) {
if (!elem_type->is_loaded()) {
field_type = TypeInstPtr::BOTTOM;
- } else if (field != NULL && field->is_constant()) {
+ } else if (field != NULL && field->is_constant() && field->is_static()) {
// This can happen if the constant oop is non-perm.
ciObject* con = field->constant_value().as_object();
// Do not "join" in the previous type; it doesn't add value,
--- a/hotspot/src/share/vm/opto/output.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/output.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -382,6 +382,10 @@
if (min_offset_from_last_call == 0) {
blk_size += nop_size;
}
+ } else if (mach->ideal_Opcode() == Op_Jump) {
+ const_size += b->_num_succs; // Address table size
+ // The size is valid even for 64 bit since it is
+ // multiplied by 2*jintSize on this method exit.
}
}
min_offset_from_last_call += inst_size;
--- a/hotspot/src/share/vm/opto/reg_split.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/opto/reg_split.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -271,6 +271,32 @@
return maxlrg;
}
+//------------------------------clone_node----------------------------
+// Clone node with anti dependence check.
+Node* clone_node(Node* def, Block *b, Compile* C) {
+ if (def->needs_anti_dependence_check()) {
+#ifdef ASSERT
+ if (Verbose) {
+ tty->print_cr("RA attempts to clone node with anti_dependence:");
+ def->dump(-1); tty->cr();
+ tty->print_cr("into block:");
+ b->dump();
+ }
+#endif
+ if (C->subsume_loads() == true && !C->failing()) {
+ // Retry with subsume_loads == false
+ // If this is the first failure, the sentinel string will "stick"
+ // to the Compile object, and the C2Compiler will see it and retry.
+ C->record_failure(C2Compiler::retry_no_subsuming_loads());
+ } else {
+ // Bailout without retry
+ C->record_method_not_compilable("RA Split failed: attempt to clone node with anti_dependence");
+ }
+ return 0;
+ }
+ return def->clone();
+}
+
//------------------------------split_Rematerialize----------------------------
// Clone a local copy of the def.
Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray<uint> splits, int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru ) {
@@ -298,8 +324,8 @@
}
}
- Node *spill = def->clone();
- if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
+ Node *spill = clone_node(def, b, C);
+ if (spill == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
// Check when generating nodes
return 0;
}
@@ -834,13 +860,13 @@
// The effect of this clone is to drop the node out of the block,
// so that the allocator does not see it anymore, and therefore
// does not attempt to assign it a register.
- def = def->clone();
+ def = clone_node(def, b, C);
+ if (def == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
+ return 0;
+ }
_names.extend(def->_idx,0);
_cfg._bbs.map(def->_idx,b);
n->set_req(inpidx, def);
- if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
- return 0;
- }
continue;
}
--- a/hotspot/src/share/vm/prims/jni.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/prims/jni.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -3414,6 +3414,8 @@
thread->initialize_tlab();
+ thread->cache_global_variables();
+
// Crucial that we do not have a safepoint check for this thread, since it has
// not been added to the Thread list yet.
{ Threads_lock->lock_without_safepoint_check();
--- a/hotspot/src/share/vm/prims/jvmtiEnvThreadState.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/prims/jvmtiEnvThreadState.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -24,6 +24,8 @@
#ifndef _JAVA_JVMTIENVTHREADSTATE_H_
#define _JAVA_JVMTIENVTHREADSTATE_H_
+class JvmtiEnv;
+
///////////////////////////////////////////////////////////////
//
// class JvmtiFramePop
--- a/hotspot/src/share/vm/runtime/arguments.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -2593,6 +2593,12 @@
FLAG_IS_DEFAULT(UseVMInterruptibleIO)) {
FLAG_SET_DEFAULT(UseVMInterruptibleIO, true);
}
+#ifdef LINUX
+ if (JDK_Version::current().compare_major(6) <= 0 &&
+ FLAG_IS_DEFAULT(UseLinuxPosixThreadCPUClocks)) {
+ FLAG_SET_DEFAULT(UseLinuxPosixThreadCPUClocks, false);
+ }
+#endif // LINUX
return JNI_OK;
}
@@ -2659,6 +2665,28 @@
}
#endif
+ // If we are running in a headless jre, force java.awt.headless property
+ // to be true unless the property has already been set.
+ // Also allow the OS environment variable JAVA_AWT_HEADLESS to set headless state.
+ if (os::is_headless_jre()) {
+ const char* headless = Arguments::get_property("java.awt.headless");
+ if (headless == NULL) {
+ char envbuffer[128];
+ if (!os::getenv("JAVA_AWT_HEADLESS", envbuffer, sizeof(envbuffer))) {
+ if (!add_property("java.awt.headless=true")) {
+ return JNI_ENOMEM;
+ }
+ } else {
+ char buffer[256];
+ strcpy(buffer, "java.awt.headless=");
+ strcat(buffer, envbuffer);
+ if (!add_property(buffer)) {
+ return JNI_ENOMEM;
+ }
+ }
+ }
+ }
+
if (!check_vm_args_consistency()) {
return JNI_ERR;
}
@@ -2979,6 +3007,14 @@
CommandLineFlags::printFlags();
}
+ // Apply CPU specific policy for the BiasedLocking
+ if (UseBiasedLocking) {
+ if (!VM_Version::use_biased_locking() &&
+ !(FLAG_IS_CMDLINE(UseBiasedLocking))) {
+ UseBiasedLocking = false;
+ }
+ }
+
return JNI_OK;
}
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -254,6 +254,7 @@
}
+#ifndef SHARK
// Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
CodeBlob* cb = stub_frame.cb();
// Verify we have the right vframeArray
@@ -270,6 +271,10 @@
assert(cb->is_deoptimization_stub() || cb->is_uncommon_trap_stub(), "just checking");
Events::log("fetch unroll sp " INTPTR_FORMAT, unpack_sp);
#endif
+#else
+ intptr_t* unpack_sp = stub_frame.sender(&dummy_map).unextended_sp();
+#endif // !SHARK
+
// This is a guarantee instead of an assert because if vframe doesn't match
// we will unpack the wrong deoptimized frame and wind up in strange places
// where it will be very difficult to figure out what went wrong. Better
@@ -380,7 +385,9 @@
frame_pcs[0] = deopt_sender.raw_pc();
+#ifndef SHARK
assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
+#endif // SHARK
UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
caller_adjustment * BytesPerWord,
@@ -1073,7 +1080,7 @@
JRT_END
-#ifdef COMPILER2
+#if defined(COMPILER2) || defined(SHARK)
void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index, TRAPS) {
// in case of an unresolved klass entry, load the class.
if (constant_pool->tag_at(index).is_unresolved_klass()) {
@@ -1835,7 +1842,7 @@
if (xtty != NULL) xtty->tail("statistics");
}
}
-#else // COMPILER2
+#else // COMPILER2 || SHARK
// Stubs for C1 only system.
@@ -1871,4 +1878,4 @@
return buf;
}
-#endif // COMPILER2
+#endif // COMPILER2 || SHARK
--- a/hotspot/src/share/vm/runtime/frame.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/frame.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -876,6 +876,7 @@
#endif /* CC_INTERP */
+#ifndef PPC
if (m->is_native()) {
#ifdef CC_INTERP
f->do_oop((oop*)&istate->_oop_temp);
@@ -883,6 +884,11 @@
f->do_oop((oop*)( fp() + interpreter_frame_oop_temp_offset ));
#endif /* CC_INTERP */
}
+#else // PPC
+ if (m->is_native() && m->is_static()) {
+ f->do_oop(interpreter_frame_mirror_addr());
+ }
+#endif // PPC
int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
@@ -1094,6 +1100,10 @@
oops_entry_do(f, map);
} else if (CodeCache::contains(pc())) {
oops_code_blob_do(f, cf, map);
+#ifdef SHARK
+ } else if (is_fake_stub_frame()) {
+ // nothing to do
+#endif // SHARK
} else {
ShouldNotReachHere();
}
--- a/hotspot/src/share/vm/runtime/frame.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/frame.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -25,6 +25,7 @@
typedef class BytecodeInterpreter* interpreterState;
class CodeBlob;
+class vframeArray;
// A frame represents a physical stack frame (an activation). Frames
@@ -296,6 +297,9 @@
void interpreter_frame_set_method(methodOop method);
methodOop* interpreter_frame_method_addr() const;
constantPoolCacheOop* interpreter_frame_cache_addr() const;
+#ifdef PPC
+ oop* interpreter_frame_mirror_addr() const;
+#endif
public:
// Entry frames
--- a/hotspot/src/share/vm/runtime/globals.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/globals.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -181,6 +181,18 @@
#define C2_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2 notproduct}", DEFAULT },
#endif
+#define SHARK_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{Shark product}", DEFAULT },
+#define SHARK_PD_PRODUCT_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, "{Shark pd product}", DEFAULT },
+#define SHARK_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{Shark diagnostic}", DEFAULT },
+#ifdef PRODUCT
+ #define SHARK_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
+ #define SHARK_PD_DEVELOP_FLAG_STRUCT(type, name, doc) /* flag is constant */
+ #define SHARK_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc)
+#else
+ #define SHARK_DEVELOP_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{Shark}", DEFAULT },
+ #define SHARK_PD_DEVELOP_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, "{Shark pd}", DEFAULT },
+ #define SHARK_NOTPRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{Shark notproduct}", DEFAULT },
+#endif
static Flag flagTable[] = {
RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT, RUNTIME_LP64_PRODUCT_FLAG_STRUCT)
@@ -194,6 +206,9 @@
#ifdef COMPILER2
C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_EXPERIMENTAL_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT)
#endif
+#ifdef SHARK
+ SHARK_FLAGS(SHARK_DEVELOP_FLAG_STRUCT, SHARK_PD_DEVELOP_FLAG_STRUCT, SHARK_PRODUCT_FLAG_STRUCT, SHARK_PD_PRODUCT_FLAG_STRUCT, SHARK_DIAGNOSTIC_FLAG_STRUCT, SHARK_NOTPRODUCT_FLAG_STRUCT)
+#endif
{0, NULL, NULL}
};
--- a/hotspot/src/share/vm/runtime/globals.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/globals.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -22,7 +22,7 @@
*
*/
-#if !defined(COMPILER1) && !defined(COMPILER2)
+#if !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK)
define_pd_global(bool, BackgroundCompilation, false);
define_pd_global(bool, UseTLAB, false);
define_pd_global(bool, CICompileOSR, false);
@@ -607,7 +607,7 @@
notproduct(bool, PrintMallocFree, false, \
"Trace calls to C heap malloc/free allocation") \
\
- notproduct(bool, PrintOopAddress, false, \
+ product(bool, PrintOopAddress, false, \
"Always print the location of the oop") \
\
notproduct(bool, VerifyCodeCacheOften, false, \
@@ -2442,6 +2442,10 @@
"Call fatal if this exception is thrown. Example: " \
"java -XX:AbortVMOnException=java.lang.NullPointerException Foo") \
\
+ notproduct(ccstr, AbortVMOnExceptionMessage, NULL, \
+ "Call fatal if the exception pointed by AbortVMOnException " \
+ "has this message.") \
+ \
develop(bool, DebugVtables, false, \
"add debugging code to vtable dispatch") \
\
@@ -3554,7 +3558,6 @@
"EINTR for I/O operations results in OS_INTRPT. The default value"\
" of this flag is true for JDK 6 and earliers")
-
/*
* Macros for factoring of globals
*/
--- a/hotspot/src/share/vm/runtime/java.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/java.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -378,7 +378,8 @@
}
// Terminate watcher thread - must before disenrolling any periodic task
- WatcherThread::stop();
+ if (PeriodicTask::num_tasks() > 0)
+ WatcherThread::stop();
// Print statistics gathered (profiling ...)
if (Arguments::has_profile()) {
--- a/hotspot/src/share/vm/runtime/javaFrameAnchor.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/javaFrameAnchor.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -76,7 +76,6 @@
JavaFrameAnchor() { clear(); }
JavaFrameAnchor(JavaFrameAnchor *src) { copy(src); }
- address last_Java_pc(void) { return _last_Java_pc; }
void set_last_Java_pc(address pc) { _last_Java_pc = pc; }
// Assembly stub generation helpers
--- a/hotspot/src/share/vm/runtime/os.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/os.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -735,6 +735,152 @@
st->print_cr("elapsed time: %d seconds", (int)t);
}
+// moved from debug.cpp (used to be find()) but still called from there
+// The print_pc parameter is only set by the debug code in one case
+void os::print_location(outputStream* st, intptr_t x, bool print_pc) {
+ address addr = (address)x;
+ CodeBlob* b = CodeCache::find_blob_unsafe(addr);
+ if (b != NULL) {
+ if (b->is_buffer_blob()) {
+ // the interpreter is generated into a buffer blob
+ InterpreterCodelet* i = Interpreter::codelet_containing(addr);
+ if (i != NULL) {
+ i->print_on(st);
+ return;
+ }
+ if (Interpreter::contains(addr)) {
+ st->print_cr(INTPTR_FORMAT " is pointing into interpreter code"
+ " (not bytecode specific)", addr);
+ return;
+ }
+ //
+ if (AdapterHandlerLibrary::contains(b)) {
+ st->print_cr("Printing AdapterHandler");
+ AdapterHandlerLibrary::print_handler_on(st, b);
+ }
+ // the stubroutines are generated into a buffer blob
+ StubCodeDesc* d = StubCodeDesc::desc_for(addr);
+ if (d != NULL) {
+ d->print_on(st);
+ if (print_pc) st->cr();
+ return;
+ }
+ if (StubRoutines::contains(addr)) {
+ st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) "
+ "stub routine", addr);
+ return;
+ }
+ // the InlineCacheBuffer is using stubs generated into a buffer blob
+ if (InlineCacheBuffer::contains(addr)) {
+ st->print_cr(INTPTR_FORMAT " is pointing into InlineCacheBuffer", addr);
+ return;
+ }
+ VtableStub* v = VtableStubs::stub_containing(addr);
+ if (v != NULL) {
+ v->print_on(st);
+ return;
+ }
+ }
+ if (print_pc && b->is_nmethod()) {
+ ResourceMark rm;
+ st->print("%#p: Compiled ", addr);
+ ((nmethod*)b)->method()->print_value_on(st);
+ st->print(" = (CodeBlob*)" INTPTR_FORMAT, b);
+ st->cr();
+ return;
+ }
+ if ( b->is_nmethod()) {
+ if (b->is_zombie()) {
+ st->print_cr(INTPTR_FORMAT " is zombie nmethod", b);
+ } else if (b->is_not_entrant()) {
+ st->print_cr(INTPTR_FORMAT " is non-entrant nmethod", b);
+ }
+ }
+ b->print_on(st);
+ return;
+ }
+
+ if (Universe::heap()->is_in(addr)) {
+ HeapWord* p = Universe::heap()->block_start(addr);
+ bool print = false;
+ // If we couldn't find it it just may mean that heap wasn't parseable
+ // See if we were just given an oop directly
+ if (p != NULL && Universe::heap()->block_is_obj(p)) {
+ print = true;
+ } else if (p == NULL && ((oopDesc*)addr)->is_oop()) {
+ p = (HeapWord*) addr;
+ print = true;
+ }
+ if (print) {
+ oop(p)->print_on(st);
+ if (p != (HeapWord*)x && oop(p)->is_constMethod() &&
+ constMethodOop(p)->contains(addr)) {
+ Thread *thread = Thread::current();
+ HandleMark hm(thread);
+ methodHandle mh (thread, constMethodOop(p)->method());
+ if (!mh->is_native()) {
+ st->print_cr("bci_from(%p) = %d; print_codes():",
+ addr, mh->bci_from(address(x)));
+ mh->print_codes_on(st);
+ }
+ }
+ return;
+ }
+ } else {
+ if (Universe::heap()->is_in_reserved(addr)) {
+ st->print_cr(INTPTR_FORMAT " is an unallocated location "
+ "in the heap", addr);
+ return;
+ }
+ }
+ if (JNIHandles::is_global_handle((jobject) addr)) {
+ st->print_cr(INTPTR_FORMAT " is a global jni handle", addr);
+ return;
+ }
+ if (JNIHandles::is_weak_global_handle((jobject) addr)) {
+ st->print_cr(INTPTR_FORMAT " is a weak global jni handle", addr);
+ return;
+ }
+#ifndef PRODUCT
+ // we don't keep the block list in product mode
+ if (JNIHandleBlock::any_contains((jobject) addr)) {
+ st->print_cr(INTPTR_FORMAT " is a local jni handle", addr);
+ return;
+ }
+#endif
+
+ for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
+ // Check for privilege stack
+ if (thread->privileged_stack_top() != NULL &&
+ thread->privileged_stack_top()->contains(addr)) {
+ st->print_cr(INTPTR_FORMAT " is pointing into the privilege stack "
+ "for thread: " INTPTR_FORMAT, addr, thread);
+ thread->print_on(st);
+ return;
+ }
+ // If the addr is a java thread print information about that.
+ if (addr == (address)thread) {
+ thread->print_on(st);
+ return;
+ }
+ // If the addr is in the stack region for this thread then report that
+ // and print thread info
+ if (thread->stack_base() >= addr &&
+ addr > (thread->stack_base() - thread->stack_size())) {
+ st->print_cr(INTPTR_FORMAT " is pointing into the stack for thread: "
+ INTPTR_FORMAT, addr, thread);
+ thread->print_on(st);
+ return;
+ }
+
+ }
+ // Try an OS specific find
+ if (os::find(addr, st)) {
+ return;
+ }
+
+ st->print_cr(INTPTR_FORMAT " is pointing to unknown location", addr);
+}
// Looks like all platforms except IA64 can use the same function to check
// if C stack is walkable beyond current frame. The check for fp() is not
--- a/hotspot/src/share/vm/runtime/os.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/os.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -78,8 +78,10 @@
}
public:
- static void init(void); // Called before command line parsing
- static jint init_2(void); // Called after command line parsing
+
+ static void init(void); // Called before command line parsing
+ static jint init_2(void); // Called after command line parsing
+ static void init_3(void); // Called at the end of vm init
// File names are case-insensitive on windows only
// Override me as needed
@@ -322,7 +324,8 @@
pgc_thread, // Parallel GC thread
java_thread,
compiler_thread,
- watcher_thread
+ watcher_thread,
+ os_thread
};
static bool create_thread(Thread* thread,
@@ -451,6 +454,8 @@
static void print_signal_handlers(outputStream* st, char* buf, size_t buflen);
static void print_date_and_time(outputStream* st);
+ static void print_location(outputStream* st, intptr_t x, bool print_pc = false);
+
// The following two functions are used by fatal error handler to trace
// native (C) frames. They are not part of frame.hpp/frame.cpp because
// frame.hpp/cpp assume thread is JavaThread, and also because different
@@ -480,6 +485,9 @@
// Fills in path to jvm.dll/libjvm.so (this info used to find hpi).
static void jvm_path(char *buf, jint buflen);
+ // Returns true if we are running in a headless jre.
+ static bool is_headless_jre();
+
// JNI names
static void print_jni_name_prefix_on(outputStream* st, int args_size);
static void print_jni_name_suffix_on(outputStream* st, int args_size);
@@ -580,8 +588,8 @@
// Platform dependent stuff
#include "incls/_os_pd.hpp.incl"
- // debugging support (mostly used by debug.cpp)
- static bool find(address pc) PRODUCT_RETURN0; // OS specific function to make sense out of an address
+ // debugging support (mostly used by debug.cpp but also fatal error handler)
+ static bool find(address pc, outputStream* st = tty); // OS specific function to make sense out of an address
static bool dont_yield(); // when true, JVM_Yield() is nop
static void print_statistics();
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -191,6 +191,121 @@
return ((jdouble)fmod((double)x,(double)y));
JRT_END
+#ifdef __SOFTFP__
+JRT_LEAF(jfloat, SharedRuntime::fadd(jfloat x, jfloat y))
+ return x + y;
+JRT_END
+
+JRT_LEAF(jfloat, SharedRuntime::fsub(jfloat x, jfloat y))
+ return x - y;
+JRT_END
+
+JRT_LEAF(jfloat, SharedRuntime::fmul(jfloat x, jfloat y))
+ return x * y;
+JRT_END
+
+JRT_LEAF(jfloat, SharedRuntime::fdiv(jfloat x, jfloat y))
+ return x / y;
+JRT_END
+
+JRT_LEAF(jdouble, SharedRuntime::dadd(jdouble x, jdouble y))
+ return x + y;
+JRT_END
+
+JRT_LEAF(jdouble, SharedRuntime::dsub(jdouble x, jdouble y))
+ return x - y;
+JRT_END
+
+JRT_LEAF(jdouble, SharedRuntime::dmul(jdouble x, jdouble y))
+ return x * y;
+JRT_END
+
+JRT_LEAF(jdouble, SharedRuntime::ddiv(jdouble x, jdouble y))
+ return x / y;
+JRT_END
+
+JRT_LEAF(jfloat, SharedRuntime::i2f(jint x))
+ return (jfloat)x;
+JRT_END
+
+JRT_LEAF(jdouble, SharedRuntime::i2d(jint x))
+ return (jdouble)x;
+JRT_END
+
+JRT_LEAF(jdouble, SharedRuntime::f2d(jfloat x))
+ return (jdouble)x;
+JRT_END
+
+JRT_LEAF(int, SharedRuntime::fcmpl(float x, float y))
+ return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan*/
+JRT_END
+
+JRT_LEAF(int, SharedRuntime::fcmpg(float x, float y))
+ return x<y ? -1 : (x==y ? 0 : 1); /* x>y or is_nan */
+JRT_END
+
+JRT_LEAF(int, SharedRuntime::dcmpl(double x, double y))
+ return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan */
+JRT_END
+
+JRT_LEAF(int, SharedRuntime::dcmpg(double x, double y))
+ return x<y ? -1 : (x==y ? 0 : 1); /* x>y or is_nan */
+JRT_END
+
+// Functions to return the opposite of the aeabi functions for nan.
+JRT_LEAF(int, SharedRuntime::unordered_fcmplt(float x, float y))
+ return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
+JRT_END
+
+JRT_LEAF(int, SharedRuntime::unordered_dcmplt(double x, double y))
+ return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
+JRT_END
+
+JRT_LEAF(int, SharedRuntime::unordered_fcmple(float x, float y))
+ return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
+JRT_END
+
+JRT_LEAF(int, SharedRuntime::unordered_dcmple(double x, double y))
+ return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
+JRT_END
+
+JRT_LEAF(int, SharedRuntime::unordered_fcmpge(float x, float y))
+ return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
+JRT_END
+
+JRT_LEAF(int, SharedRuntime::unordered_dcmpge(double x, double y))
+ return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
+JRT_END
+
+JRT_LEAF(int, SharedRuntime::unordered_fcmpgt(float x, float y))
+ return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
+JRT_END
+
+JRT_LEAF(int, SharedRuntime::unordered_dcmpgt(double x, double y))
+ return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
+JRT_END
+
+// Intrinsics make gcc generate code for these.
+float SharedRuntime::fneg(float f) {
+ return -f;
+}
+
+double SharedRuntime::dneg(double f) {
+ return -f;
+}
+
+#endif // __SOFTFP__
+
+#if defined(__SOFTFP__) || defined(E500V2)
+// Intrinsics make gcc generate code for these.
+double SharedRuntime::dabs(double f) {
+ return (f <= (double)0.0) ? (double)0.0 - f : f;
+}
+
+double SharedRuntime::dsqrt(double f) {
+ return sqrt(f);
+}
+#endif
JRT_LEAF(jint, SharedRuntime::f2i(jfloat x))
if (g_isnan(x))
@@ -2046,6 +2161,8 @@
int AdapterHandlerTable::_hits;
int AdapterHandlerTable::_compact;
+#endif
+
class AdapterHandlerTableIterator : public StackObj {
private:
AdapterHandlerTable* _table;
@@ -2081,7 +2198,6 @@
}
}
};
-#endif
// ---------------------------------------------------------------------------
@@ -2619,7 +2735,6 @@
FREE_C_HEAP_ARRAY(intptr_t,buf);
JRT_END
-#ifndef PRODUCT
bool AdapterHandlerLibrary::contains(CodeBlob* b) {
AdapterHandlerTableIterator iter(_adapters);
while (iter.has_next()) {
@@ -2629,21 +2744,24 @@
return false;
}
-void AdapterHandlerLibrary::print_handler(CodeBlob* b) {
+void AdapterHandlerLibrary::print_handler_on(outputStream* st, CodeBlob* b) {
AdapterHandlerTableIterator iter(_adapters);
while (iter.has_next()) {
AdapterHandlerEntry* a = iter.next();
if ( b == CodeCache::find_blob(a->get_i2c_entry()) ) {
- tty->print("Adapter for signature: ");
- tty->print_cr("%s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT,
- a->fingerprint()->as_string(),
- a->get_i2c_entry(), a->get_c2i_entry(), a->get_c2i_unverified_entry());
+ st->print("Adapter for signature: ");
+ st->print_cr("%s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT,
+ a->fingerprint()->as_string(),
+ a->get_i2c_entry(), a->get_c2i_entry(), a->get_c2i_unverified_entry());
+
return;
}
}
assert(false, "Should have found handler");
}
+#ifndef PRODUCT
+
void AdapterHandlerLibrary::print_statistics() {
_adapters->print_statistics();
}
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -78,6 +78,18 @@
static jfloat frem(jfloat x, jfloat y);
static jdouble drem(jdouble x, jdouble y);
+#ifdef __SOFTFP__
+ static jfloat fadd(jfloat x, jfloat y);
+ static jfloat fsub(jfloat x, jfloat y);
+ static jfloat fmul(jfloat x, jfloat y);
+ static jfloat fdiv(jfloat x, jfloat y);
+
+ static jdouble dadd(jdouble x, jdouble y);
+ static jdouble dsub(jdouble x, jdouble y);
+ static jdouble dmul(jdouble x, jdouble y);
+ static jdouble ddiv(jdouble x, jdouble y);
+#endif // __SOFTFP__
+
// float conversion (needs to set appropriate rounding mode)
static jint f2i (jfloat x);
static jlong f2l (jfloat x);
@@ -87,6 +99,12 @@
static jfloat l2f (jlong x);
static jdouble l2d (jlong x);
+#ifdef __SOFTFP__
+ static jfloat i2f (jint x);
+ static jdouble i2d (jint x);
+ static jdouble f2d (jfloat x);
+#endif // __SOFTFP__
+
// double trigonometrics and transcendentals
static jdouble dsin(jdouble x);
static jdouble dcos(jdouble x);
@@ -96,6 +114,32 @@
static jdouble dexp(jdouble x);
static jdouble dpow(jdouble x, jdouble y);
+#if defined(__SOFTFP__) || defined(E500V2)
+ static double dabs(double f);
+ static double dsqrt(double f);
+#endif
+
+#ifdef __SOFTFP__
+ // C++ compiler generates soft float instructions as well as passing
+ // float and double in registers.
+ static int fcmpl(float x, float y);
+ static int fcmpg(float x, float y);
+ static int dcmpl(double x, double y);
+ static int dcmpg(double x, double y);
+
+ static int unordered_fcmplt(float x, float y);
+ static int unordered_dcmplt(double x, double y);
+ static int unordered_fcmple(float x, float y);
+ static int unordered_dcmple(double x, double y);
+ static int unordered_fcmpge(float x, float y);
+ static int unordered_dcmpge(double x, double y);
+ static int unordered_fcmpgt(float x, float y);
+ static int unordered_dcmpgt(double x, double y);
+
+ static float fneg(float f);
+ static double dneg(double f);
+#endif
+
// exception handling across interpreter/compiler boundaries
static address raw_exception_handler_for_return_address(JavaThread* thread, address return_address);
static address exception_handler_for_return_address(JavaThread* thread, address return_address);
@@ -585,9 +629,7 @@
bool compare_code(unsigned char* code, int length, int total_args_passed, BasicType* sig_bt);
#endif
-#ifndef PRODUCT
void print();
-#endif /* PRODUCT */
};
class AdapterHandlerLibrary: public AllStatic {
@@ -609,9 +651,10 @@
static nmethod* create_dtrace_nmethod (methodHandle method);
#endif // HAVE_DTRACE_H
+ static void print_handler(CodeBlob* b) { print_handler_on(tty, b); }
+ static void print_handler_on(outputStream* st, CodeBlob* b);
+ static bool contains(CodeBlob* b);
#ifndef PRODUCT
- static void print_handler(CodeBlob* b);
- static bool contains(CodeBlob* b);
static void print_statistics();
#endif /* PRODUCT */
--- a/hotspot/src/share/vm/runtime/sharedRuntimeTrans.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/sharedRuntimeTrans.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -572,7 +572,11 @@
if(hy<0) z = one/z; /* z = (1/|x|) */
if(hx<0) {
if(((ix-0x3ff00000)|yisint)==0) {
+#ifdef CAN_USE_NAN_DEFINE
+ z = NAN;
+#else
z = (z-z)/(z-z); /* (-1)**non-int is NaN */
+#endif
} else if(yisint==1)
z = -1.0*z; /* (x<0)**odd = -(|x|**odd) */
}
@@ -583,7 +587,12 @@
n = (hx>>31)+1;
/* (x<0)**(non-int) is NaN */
- if((n|yisint)==0) return (x-x)/(x-x);
+ if((n|yisint)==0)
+#ifdef CAN_USE_NAN_DEFINE
+ return NAN;
+#else
+ return (x-x)/(x-x);
+#endif
s = one; /* s (sign of result -ve**odd) = -1 else = 1 */
if((n|(yisint-1))==0) s = -one;/* (-ve)**(odd int) */
--- a/hotspot/src/share/vm/runtime/signature.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/signature.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -275,11 +275,7 @@
void do_bool () { pass_int(); _jni_offset++; _offset++; }
void do_char () { pass_int(); _jni_offset++; _offset++; }
-#if defined(_LP64) || defined(ZERO)
void do_float () { pass_float(); _jni_offset++; _offset++; }
-#else
- void do_float () { pass_int(); _jni_offset++; _offset++; }
-#endif
#ifdef _LP64
void do_double() { pass_double(); _jni_offset++; _offset += 2; }
#else
@@ -306,9 +302,7 @@
virtual void pass_int() = 0;
virtual void pass_long() = 0;
virtual void pass_object() = 0;
-#if defined(_LP64) || defined(ZERO)
virtual void pass_float() = 0;
-#endif
#ifdef _LP64
virtual void pass_double() = 0;
#else
--- a/hotspot/src/share/vm/runtime/stubCodeGenerator.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/stubCodeGenerator.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -53,15 +53,13 @@
}
-void StubCodeDesc::print() {
- tty->print(group());
- tty->print("::");
- tty->print(name());
- tty->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT "[ (%d bytes)", begin(), end(), size_in_bytes());
+void StubCodeDesc::print_on(outputStream* st) const {
+ st->print(group());
+ st->print("::");
+ st->print(name());
+ st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT "[ (%d bytes)", begin(), end(), size_in_bytes());
}
-
-
// Implementation of StubCodeGenerator
StubCodeGenerator::StubCodeGenerator(CodeBuffer* code) {
--- a/hotspot/src/share/vm/runtime/stubCodeGenerator.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/stubCodeGenerator.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -79,7 +79,8 @@
address end() const { return _end; }
int size_in_bytes() const { return _end - _begin; }
bool contains(address pc) const { return _begin <= pc && pc < _end; }
- void print();
+ void print_on(outputStream* st) const;
+ void print() const { print_on(tty); }
};
// The base class for all stub-generating code generators.
--- a/hotspot/src/share/vm/runtime/thread.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/thread.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -807,7 +807,7 @@
// should be revisited, and they should be removed if possible.
bool Thread::is_lock_owned(address adr) const {
- return (_stack_base >= adr && adr >= (_stack_base - _stack_size));
+ return on_local_stack(adr);
}
bool Thread::set_as_starting_thread() {
@@ -1020,7 +1020,7 @@
// timer interrupts exists on the platform.
WatcherThread* WatcherThread::_watcher_thread = NULL;
-bool WatcherThread::_should_terminate = false;
+volatile bool WatcherThread::_should_terminate = false;
WatcherThread::WatcherThread() : Thread() {
assert(watcher_thread() == NULL, "we can only allocate one WatcherThread");
@@ -1052,8 +1052,26 @@
// Calculate how long it'll be until the next PeriodicTask work
// should be done, and sleep that amount of time.
- const size_t time_to_wait = PeriodicTask::time_to_wait();
- os::sleep(this, time_to_wait, false);
+ size_t time_to_wait = PeriodicTask::time_to_wait();
+
+ // we expect this to timeout - we only ever get unparked when
+ // we should terminate
+ {
+ OSThreadWaitState osts(this->osthread(), false /* not Object.wait() */);
+
+ jlong prev_time = os::javaTimeNanos();
+ for (;;) {
+ int res= _SleepEvent->park(time_to_wait);
+ if (res == OS_TIMEOUT || _should_terminate)
+ break;
+ // spurious wakeup of some kind
+ jlong now = os::javaTimeNanos();
+ time_to_wait -= (now - prev_time) / 1000000;
+ if (time_to_wait <= 0)
+ break;
+ prev_time = now;
+ }
+ }
if (is_error_reported()) {
// A fatal error has happened, the error handler(VMError::report_and_die)
@@ -1115,6 +1133,12 @@
// it is ok to take late safepoints here, if needed
MutexLocker mu(Terminator_lock);
_should_terminate = true;
+ OrderAccess::fence(); // ensure WatcherThread sees update in main loop
+
+ Thread* watcher = watcher_thread();
+ if (watcher != NULL)
+ watcher->_SleepEvent->unpark();
+
while(watcher_thread() != NULL) {
// This wait should make safepoint checks, wait without a timeout,
// and wait as a suspend-equivalent condition.
@@ -1364,6 +1388,8 @@
this->create_stack_guard_pages();
+ this->cache_global_variables();
+
// Thread is now sufficient initialized to be handled by the safepoint code as being
// in the VM. Change thread state from _thread_new to _thread_in_vm
ThreadStateTransition::transition_and_fence(this, _thread_new, _thread_in_vm);
@@ -2955,6 +2981,9 @@
return status;
}
+ // Should be done after the heap is fully created
+ main_thread->cache_global_variables();
+
HandleMark hm;
{ MutexLocker mu(Threads_lock);
@@ -3230,6 +3259,9 @@
WatcherThread::start();
}
+ // Give os specific code one last chance to start
+ os::init_3();
+
create_vm_timer.end();
return JNI_OK;
}
@@ -3249,12 +3281,18 @@
char buffer[JVM_MAXPATHLEN];
char ebuf[1024];
const char *name = agent->name();
+ const char *msg = "Could not find agent library ";
if (agent->is_absolute_path()) {
library = hpi::dll_load(name, ebuf, sizeof ebuf);
if (library == NULL) {
+ const char *sub_msg = " in absolute path, with error: ";
+ size_t len = strlen(msg) + strlen(name) + strlen(sub_msg) + strlen(ebuf) + 1;
+ char *buf = NEW_C_HEAP_ARRAY(char, len);
+ jio_snprintf(buf, len, "%s%s%s%s", msg, name, sub_msg, ebuf);
// If we can't find the agent, exit.
- vm_exit_during_initialization("Could not find agent library in absolute path", name);
+ vm_exit_during_initialization(buf, NULL);
+ FREE_C_HEAP_ARRAY(char, buf);
}
} else {
// Try to load the agent from the standard dll directory
@@ -3267,17 +3305,17 @@
char *home = Arguments::get_java_home();
const char *fmt = "%s/bin/java %s -Dkernel.background.download=false"
" sun.jkernel.DownloadManager -download client_jvm";
- int length = strlen(props) + strlen(home) + strlen(fmt) + 1;
- char *cmd = AllocateHeap(length);
+ size_t length = strlen(props) + strlen(home) + strlen(fmt) + 1;
+ char *cmd = NEW_C_HEAP_ARRAY(char, length);
jio_snprintf(cmd, length, fmt, home, props);
int status = os::fork_and_exec(cmd);
FreeHeap(props);
- FreeHeap(cmd);
if (status == -1) {
warning(cmd);
vm_exit_during_initialization("fork_and_exec failed: %s",
strerror(errno));
}
+ FREE_C_HEAP_ARRAY(char, cmd);
// when this comes back the instrument.dll should be where it belongs.
library = hpi::dll_load(buffer, ebuf, sizeof ebuf);
}
@@ -3287,8 +3325,13 @@
hpi::dll_build_name(buffer, sizeof(buffer), ns, name);
library = hpi::dll_load(buffer, ebuf, sizeof ebuf);
if (library == NULL) {
+ const char *sub_msg = " on the library path, with error: ";
+ size_t len = strlen(msg) + strlen(name) + strlen(sub_msg) + strlen(ebuf) + 1;
+ char *buf = NEW_C_HEAP_ARRAY(char, len);
+ jio_snprintf(buf, len, "%s%s%s%s", msg, name, sub_msg, ebuf);
// If we can't find the agent, exit.
- vm_exit_during_initialization("Could not find agent library on the library path or in the local directory", name);
+ vm_exit_during_initialization(buf, NULL);
+ FREE_C_HEAP_ARRAY(char, buf);
}
}
}
--- a/hotspot/src/share/vm/runtime/thread.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/thread.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -410,9 +410,6 @@
// Sweeper support
void nmethods_do(CodeBlobClosure* cf);
- // Tells if adr belong to this thread. This is used
- // for checking if a lock is owned by the running thread.
-
// Used by fast lock support
virtual bool is_lock_owned(address adr) const;
@@ -449,6 +446,11 @@
void set_stack_size(size_t size) { _stack_size = size; }
void record_stack_base_and_size();
+ bool on_local_stack(address adr) const {
+ /* QQQ this has knowledge of direction, ought to be a stack method */
+ return (_stack_base >= adr && adr >= (_stack_base - _stack_size));
+ }
+
int lgrp_id() const { return _lgrp_id; }
void set_lgrp_id(int value) { _lgrp_id = value; }
@@ -609,7 +611,7 @@
private:
static WatcherThread* _watcher_thread;
- static bool _should_terminate;
+ volatile static bool _should_terminate; // updated without holding lock
public:
enum SomeConstants {
delay_interval = 10 // interrupt delay in milliseconds
@@ -839,6 +841,10 @@
return (struct JNINativeInterface_ *)_jni_environment.functions;
}
+ // This function is called at thread creation to allow
+ // platform specific thread variables to be initialized.
+ void cache_global_variables();
+
// Executes Shutdown.shutdown()
void invoke_shutdown_hooks();
--- a/hotspot/src/share/vm/runtime/vm_version.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/vm_version.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -95,7 +95,11 @@
#define VMTYPE "Server"
#else // TIERED
#ifdef ZERO
+#ifdef SHARK
+ #define VMTYPE "Shark"
+#else // SHARK
#define VMTYPE "Zero"
+#endif // SHARK
#else // ZERO
#define VMTYPE COMPILER1_PRESENT("Client") \
COMPILER2_PRESENT("Server")
@@ -152,6 +156,8 @@
#define CPU IA32_ONLY("x86") \
IA64_ONLY("ia64") \
AMD64_ONLY("amd64") \
+ ARM_ONLY("arm") \
+ PPC_ONLY("ppc") \
SPARC_ONLY("sparc")
#endif // ZERO
--- a/hotspot/src/share/vm/runtime/vm_version.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/runtime/vm_version.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -70,6 +70,9 @@
return _logical_processors_per_package;
}
+ // ARCH specific policy for the BiasedLocking
+ static bool use_biased_locking() { return true; }
+
// Number of page sizes efficiently supported by the hardware. Most chips now
// support two sizes, thus this default implementation. Processor-specific
// subclasses should define new versions to hide this one as needed. Note
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/llvmHeaders.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009, 2010 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifdef assert
+ #undef assert
+#endif
+
+#ifdef DEBUG
+ #define SHARK_DEBUG
+ #undef DEBUG
+#endif
+
+#include <llvm/Argument.h>
+#include <llvm/Constants.h>
+#include <llvm/DerivedTypes.h>
+#include <llvm/ExecutionEngine/ExecutionEngine.h>
+#include <llvm/Instructions.h>
+#include <llvm/LLVMContext.h>
+#include <llvm/Module.h>
+#if SHARK_LLVM_VERSION < 27
+#include <llvm/ModuleProvider.h>
+#endif
+#include <llvm/Support/IRBuilder.h>
+#include <llvm/System/Threading.h>
+#include <llvm/Target/TargetSelect.h>
+#include <llvm/Type.h>
+#include <llvm/ExecutionEngine/JITMemoryManager.h>
+#include <llvm/Support/CommandLine.h>
+#if SHARK_LLVM_VERSION >= 27
+#include <llvm/ExecutionEngine/JIT.h>
+#include <llvm/ADT/StringMap.h>
+#include <llvm/Support/Debug.h>
+#include <llvm/System/Host.h>
+#endif
+
+#include <map>
+
+#ifdef assert
+ #undef assert
+#endif
+
+// from hotspot/src/share/vm/utilities/debug.hpp
+#ifdef ASSERT
+#ifndef USE_REPEATED_ASSERTS
+#define assert(p, msg) \
+do { \
+ if (!(p)) { \
+ report_vm_error(__FILE__, __LINE__, "assert(" #p ") failed", msg); \
+ BREAKPOINT; \
+ } \
+} while (0)
+#else // #ifndef USE_REPEATED_ASSERTS
+#define assert(p, msg)
+do { \
+ for (int __i = 0; __i < AssertRepeat; __i++) { \
+ if (!(p)) { \
+ report_vm_error(__FILE__, __LINE__, "assert(" #p ") failed", msg); \
+ BREAKPOINT; \
+ } \
+ } \
+} while (0)
+#endif // #ifndef USE_REPEATED_ASSERTS
+#else
+ #define assert(p, msg)
+#endif
+
+#ifdef DEBUG
+ #undef DEBUG
+#endif
+#ifdef SHARK_DEBUG
+ #define DEBUG
+ #undef SHARK_DEBUG
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/llvmValue.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+class LLVMValue : public AllStatic {
+ public:
+ static llvm::ConstantInt* jbyte_constant(jbyte value)
+ {
+ return llvm::ConstantInt::get(SharkType::jbyte_type(), value, true);
+ }
+ static llvm::ConstantInt* jint_constant(jint value)
+ {
+ return llvm::ConstantInt::get(SharkType::jint_type(), value, true);
+ }
+ static llvm::ConstantInt* jlong_constant(jlong value)
+ {
+ return llvm::ConstantInt::get(SharkType::jlong_type(), value, true);
+ }
+ static llvm::ConstantFP* jfloat_constant(jfloat value)
+ {
+ return llvm::ConstantFP::get(SharkContext::current(), llvm::APFloat(value));
+ }
+ static llvm::ConstantFP* jdouble_constant(jdouble value)
+ {
+ return llvm::ConstantFP::get(SharkContext::current(), llvm::APFloat(value));
+ }
+ static llvm::ConstantPointerNull* null()
+ {
+ return llvm::ConstantPointerNull::get(SharkType::oop_type());
+ }
+
+ public:
+ static llvm::ConstantInt* bit_constant(int value)
+ {
+ return llvm::ConstantInt::get(SharkType::bit_type(), value, false);
+ }
+ static llvm::ConstantInt* intptr_constant(intptr_t value)
+ {
+ return llvm::ConstantInt::get(SharkType::intptr_type(), value, false);
+ }
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkBlock.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,1260 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009, 2010 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharkBlock.cpp.incl"
+
+using namespace llvm;
+
+void SharkBlock::parse_bytecode(int start, int limit) {
+ SharkValue *a, *b, *c, *d;
+ int i;
+
+ // Ensure the current state is initialized before we emit any code,
+ // so that any setup code for the state is at the start of the block
+ current_state();
+
+ // Parse the bytecodes
+ iter()->reset_to_bci(start);
+ while (iter()->next_bci() < limit) {
+ NOT_PRODUCT(a = b = c = d = NULL);
+ iter()->next();
+
+ if (SharkTraceBytecodes)
+ tty->print_cr("%4d: %s", bci(), Bytecodes::name(bc()));
+
+ if (has_trap() && trap_bci() == bci()) {
+ do_trap(trap_request());
+ return;
+ }
+
+ if (UseLoopSafepoints) {
+ // XXX if a lcmp is followed by an if_?? then C2 maybe-inserts
+ // the safepoint before the lcmp rather than before the if.
+ // Maybe we should do this too. See parse2.cpp for details.
+ switch (bc()) {
+ case Bytecodes::_goto:
+ case Bytecodes::_ifnull:
+ case Bytecodes::_ifnonnull:
+ case Bytecodes::_if_acmpeq:
+ case Bytecodes::_if_acmpne:
+ case Bytecodes::_ifeq:
+ case Bytecodes::_ifne:
+ case Bytecodes::_iflt:
+ case Bytecodes::_ifle:
+ case Bytecodes::_ifgt:
+ case Bytecodes::_ifge:
+ case Bytecodes::_if_icmpeq:
+ case Bytecodes::_if_icmpne:
+ case Bytecodes::_if_icmplt:
+ case Bytecodes::_if_icmple:
+ case Bytecodes::_if_icmpgt:
+ case Bytecodes::_if_icmpge:
+ if (iter()->get_dest() <= bci())
+ maybe_add_backedge_safepoint();
+ break;
+
+ case Bytecodes::_goto_w:
+ if (iter()->get_far_dest() <= bci())
+ maybe_add_backedge_safepoint();
+ break;
+
+ case Bytecodes::_tableswitch:
+ case Bytecodes::_lookupswitch:
+ if (switch_default_dest() <= bci()) {
+ maybe_add_backedge_safepoint();
+ break;
+ }
+ int len = switch_table_length();
+ for (int i = 0; i < len; i++) {
+ if (switch_dest(i) <= bci()) {
+ maybe_add_backedge_safepoint();
+ break;
+ }
+ }
+ break;
+ }
+ }
+
+ switch (bc()) {
+ case Bytecodes::_nop:
+ break;
+
+ case Bytecodes::_aconst_null:
+ push(SharkValue::null());
+ break;
+
+ case Bytecodes::_iconst_m1:
+ push(SharkValue::jint_constant(-1));
+ break;
+ case Bytecodes::_iconst_0:
+ push(SharkValue::jint_constant(0));
+ break;
+ case Bytecodes::_iconst_1:
+ push(SharkValue::jint_constant(1));
+ break;
+ case Bytecodes::_iconst_2:
+ push(SharkValue::jint_constant(2));
+ break;
+ case Bytecodes::_iconst_3:
+ push(SharkValue::jint_constant(3));
+ break;
+ case Bytecodes::_iconst_4:
+ push(SharkValue::jint_constant(4));
+ break;
+ case Bytecodes::_iconst_5:
+ push(SharkValue::jint_constant(5));
+ break;
+
+ case Bytecodes::_lconst_0:
+ push(SharkValue::jlong_constant(0));
+ break;
+ case Bytecodes::_lconst_1:
+ push(SharkValue::jlong_constant(1));
+ break;
+
+ case Bytecodes::_fconst_0:
+ push(SharkValue::jfloat_constant(0));
+ break;
+ case Bytecodes::_fconst_1:
+ push(SharkValue::jfloat_constant(1));
+ break;
+ case Bytecodes::_fconst_2:
+ push(SharkValue::jfloat_constant(2));
+ break;
+
+ case Bytecodes::_dconst_0:
+ push(SharkValue::jdouble_constant(0));
+ break;
+ case Bytecodes::_dconst_1:
+ push(SharkValue::jdouble_constant(1));
+ break;
+
+ case Bytecodes::_bipush:
+ push(SharkValue::jint_constant(iter()->get_constant_u1()));
+ break;
+ case Bytecodes::_sipush:
+ push(SharkValue::jint_constant(iter()->get_constant_u2()));
+ break;
+
+ case Bytecodes::_ldc:
+ case Bytecodes::_ldc_w:
+ case Bytecodes::_ldc2_w:
+ push(SharkConstant::for_ldc(iter())->value(builder()));
+ break;
+
+ case Bytecodes::_iload_0:
+ case Bytecodes::_lload_0:
+ case Bytecodes::_fload_0:
+ case Bytecodes::_dload_0:
+ case Bytecodes::_aload_0:
+ push(local(0));
+ break;
+ case Bytecodes::_iload_1:
+ case Bytecodes::_lload_1:
+ case Bytecodes::_fload_1:
+ case Bytecodes::_dload_1:
+ case Bytecodes::_aload_1:
+ push(local(1));
+ break;
+ case Bytecodes::_iload_2:
+ case Bytecodes::_lload_2:
+ case Bytecodes::_fload_2:
+ case Bytecodes::_dload_2:
+ case Bytecodes::_aload_2:
+ push(local(2));
+ break;
+ case Bytecodes::_iload_3:
+ case Bytecodes::_lload_3:
+ case Bytecodes::_fload_3:
+ case Bytecodes::_dload_3:
+ case Bytecodes::_aload_3:
+ push(local(3));
+ break;
+ case Bytecodes::_iload:
+ case Bytecodes::_lload:
+ case Bytecodes::_fload:
+ case Bytecodes::_dload:
+ case Bytecodes::_aload:
+ push(local(iter()->get_index()));
+ break;
+
+ case Bytecodes::_baload:
+ do_aload(T_BYTE);
+ break;
+ case Bytecodes::_caload:
+ do_aload(T_CHAR);
+ break;
+ case Bytecodes::_saload:
+ do_aload(T_SHORT);
+ break;
+ case Bytecodes::_iaload:
+ do_aload(T_INT);
+ break;
+ case Bytecodes::_laload:
+ do_aload(T_LONG);
+ break;
+ case Bytecodes::_faload:
+ do_aload(T_FLOAT);
+ break;
+ case Bytecodes::_daload:
+ do_aload(T_DOUBLE);
+ break;
+ case Bytecodes::_aaload:
+ do_aload(T_OBJECT);
+ break;
+
+ case Bytecodes::_istore_0:
+ case Bytecodes::_lstore_0:
+ case Bytecodes::_fstore_0:
+ case Bytecodes::_dstore_0:
+ case Bytecodes::_astore_0:
+ set_local(0, pop());
+ break;
+ case Bytecodes::_istore_1:
+ case Bytecodes::_lstore_1:
+ case Bytecodes::_fstore_1:
+ case Bytecodes::_dstore_1:
+ case Bytecodes::_astore_1:
+ set_local(1, pop());
+ break;
+ case Bytecodes::_istore_2:
+ case Bytecodes::_lstore_2:
+ case Bytecodes::_fstore_2:
+ case Bytecodes::_dstore_2:
+ case Bytecodes::_astore_2:
+ set_local(2, pop());
+ break;
+ case Bytecodes::_istore_3:
+ case Bytecodes::_lstore_3:
+ case Bytecodes::_fstore_3:
+ case Bytecodes::_dstore_3:
+ case Bytecodes::_astore_3:
+ set_local(3, pop());
+ break;
+ case Bytecodes::_istore:
+ case Bytecodes::_lstore:
+ case Bytecodes::_fstore:
+ case Bytecodes::_dstore:
+ case Bytecodes::_astore:
+ set_local(iter()->get_index(), pop());
+ break;
+
+ case Bytecodes::_bastore:
+ do_astore(T_BYTE);
+ break;
+ case Bytecodes::_castore:
+ do_astore(T_CHAR);
+ break;
+ case Bytecodes::_sastore:
+ do_astore(T_SHORT);
+ break;
+ case Bytecodes::_iastore:
+ do_astore(T_INT);
+ break;
+ case Bytecodes::_lastore:
+ do_astore(T_LONG);
+ break;
+ case Bytecodes::_fastore:
+ do_astore(T_FLOAT);
+ break;
+ case Bytecodes::_dastore:
+ do_astore(T_DOUBLE);
+ break;
+ case Bytecodes::_aastore:
+ do_astore(T_OBJECT);
+ break;
+
+ case Bytecodes::_pop:
+ xpop();
+ break;
+ case Bytecodes::_pop2:
+ xpop();
+ xpop();
+ break;
+ case Bytecodes::_swap:
+ a = xpop();
+ b = xpop();
+ xpush(a);
+ xpush(b);
+ break;
+ case Bytecodes::_dup:
+ a = xpop();
+ xpush(a);
+ xpush(a);
+ break;
+ case Bytecodes::_dup_x1:
+ a = xpop();
+ b = xpop();
+ xpush(a);
+ xpush(b);
+ xpush(a);
+ break;
+ case Bytecodes::_dup_x2:
+ a = xpop();
+ b = xpop();
+ c = xpop();
+ xpush(a);
+ xpush(c);
+ xpush(b);
+ xpush(a);
+ break;
+ case Bytecodes::_dup2:
+ a = xpop();
+ b = xpop();
+ xpush(b);
+ xpush(a);
+ xpush(b);
+ xpush(a);
+ break;
+ case Bytecodes::_dup2_x1:
+ a = xpop();
+ b = xpop();
+ c = xpop();
+ xpush(b);
+ xpush(a);
+ xpush(c);
+ xpush(b);
+ xpush(a);
+ break;
+ case Bytecodes::_dup2_x2:
+ a = xpop();
+ b = xpop();
+ c = xpop();
+ d = xpop();
+ xpush(b);
+ xpush(a);
+ xpush(d);
+ xpush(c);
+ xpush(b);
+ xpush(a);
+ break;
+
+ case Bytecodes::_arraylength:
+ do_arraylength();
+ break;
+
+ case Bytecodes::_getfield:
+ do_getfield();
+ break;
+ case Bytecodes::_getstatic:
+ do_getstatic();
+ break;
+ case Bytecodes::_putfield:
+ do_putfield();
+ break;
+ case Bytecodes::_putstatic:
+ do_putstatic();
+ break;
+
+ case Bytecodes::_iadd:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jint(
+ builder()->CreateAdd(a->jint_value(), b->jint_value()), false));
+ break;
+ case Bytecodes::_isub:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jint(
+ builder()->CreateSub(a->jint_value(), b->jint_value()), false));
+ break;
+ case Bytecodes::_imul:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jint(
+ builder()->CreateMul(a->jint_value(), b->jint_value()), false));
+ break;
+ case Bytecodes::_idiv:
+ do_idiv();
+ break;
+ case Bytecodes::_irem:
+ do_irem();
+ break;
+ case Bytecodes::_ineg:
+ a = pop();
+ push(SharkValue::create_jint(
+ builder()->CreateNeg(a->jint_value()), a->zero_checked()));
+ break;
+ case Bytecodes::_ishl:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jint(
+ builder()->CreateShl(
+ a->jint_value(),
+ builder()->CreateAnd(
+ b->jint_value(), LLVMValue::jint_constant(0x1f))), false));
+ break;
+ case Bytecodes::_ishr:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jint(
+ builder()->CreateAShr(
+ a->jint_value(),
+ builder()->CreateAnd(
+ b->jint_value(), LLVMValue::jint_constant(0x1f))), false));
+ break;
+ case Bytecodes::_iushr:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jint(
+ builder()->CreateLShr(
+ a->jint_value(),
+ builder()->CreateAnd(
+ b->jint_value(), LLVMValue::jint_constant(0x1f))), false));
+ break;
+ case Bytecodes::_iand:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jint(
+ builder()->CreateAnd(a->jint_value(), b->jint_value()), false));
+ break;
+ case Bytecodes::_ior:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jint(
+ builder()->CreateOr(a->jint_value(), b->jint_value()),
+ a->zero_checked() && b->zero_checked()));
+ break;
+ case Bytecodes::_ixor:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jint(
+ builder()->CreateXor(a->jint_value(), b->jint_value()), false));
+ break;
+
+ case Bytecodes::_ladd:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jlong(
+ builder()->CreateAdd(a->jlong_value(), b->jlong_value()), false));
+ break;
+ case Bytecodes::_lsub:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jlong(
+ builder()->CreateSub(a->jlong_value(), b->jlong_value()), false));
+ break;
+ case Bytecodes::_lmul:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jlong(
+ builder()->CreateMul(a->jlong_value(), b->jlong_value()), false));
+ break;
+ case Bytecodes::_ldiv:
+ do_ldiv();
+ break;
+ case Bytecodes::_lrem:
+ do_lrem();
+ break;
+ case Bytecodes::_lneg:
+ a = pop();
+ push(SharkValue::create_jlong(
+ builder()->CreateNeg(a->jlong_value()), a->zero_checked()));
+ break;
+ case Bytecodes::_lshl:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jlong(
+ builder()->CreateShl(
+ a->jlong_value(),
+ builder()->CreateIntCast(
+ builder()->CreateAnd(
+ b->jint_value(), LLVMValue::jint_constant(0x3f)),
+ SharkType::jlong_type(), true)), false));
+ break;
+ case Bytecodes::_lshr:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jlong(
+ builder()->CreateAShr(
+ a->jlong_value(),
+ builder()->CreateIntCast(
+ builder()->CreateAnd(
+ b->jint_value(), LLVMValue::jint_constant(0x3f)),
+ SharkType::jlong_type(), true)), false));
+ break;
+ case Bytecodes::_lushr:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jlong(
+ builder()->CreateLShr(
+ a->jlong_value(),
+ builder()->CreateIntCast(
+ builder()->CreateAnd(
+ b->jint_value(), LLVMValue::jint_constant(0x3f)),
+ SharkType::jlong_type(), true)), false));
+ break;
+ case Bytecodes::_land:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jlong(
+ builder()->CreateAnd(a->jlong_value(), b->jlong_value()), false));
+ break;
+ case Bytecodes::_lor:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jlong(
+ builder()->CreateOr(a->jlong_value(), b->jlong_value()),
+ a->zero_checked() && b->zero_checked()));
+ break;
+ case Bytecodes::_lxor:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jlong(
+ builder()->CreateXor(a->jlong_value(), b->jlong_value()), false));
+ break;
+
+ case Bytecodes::_fadd:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jfloat(
+ builder()->CreateFAdd(a->jfloat_value(), b->jfloat_value())));
+ break;
+ case Bytecodes::_fsub:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jfloat(
+ builder()->CreateFSub(a->jfloat_value(), b->jfloat_value())));
+ break;
+ case Bytecodes::_fmul:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jfloat(
+ builder()->CreateFMul(a->jfloat_value(), b->jfloat_value())));
+ break;
+ case Bytecodes::_fdiv:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jfloat(
+ builder()->CreateFDiv(a->jfloat_value(), b->jfloat_value())));
+ break;
+ case Bytecodes::_frem:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jfloat(
+ builder()->CreateFRem(a->jfloat_value(), b->jfloat_value())));
+ break;
+ case Bytecodes::_fneg:
+ a = pop();
+ push(SharkValue::create_jfloat(
+ builder()->CreateFNeg(a->jfloat_value())));
+ break;
+
+ case Bytecodes::_dadd:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jdouble(
+ builder()->CreateFAdd(a->jdouble_value(), b->jdouble_value())));
+ break;
+ case Bytecodes::_dsub:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jdouble(
+ builder()->CreateFSub(a->jdouble_value(), b->jdouble_value())));
+ break;
+ case Bytecodes::_dmul:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jdouble(
+ builder()->CreateFMul(a->jdouble_value(), b->jdouble_value())));
+ break;
+ case Bytecodes::_ddiv:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jdouble(
+ builder()->CreateFDiv(a->jdouble_value(), b->jdouble_value())));
+ break;
+ case Bytecodes::_drem:
+ b = pop();
+ a = pop();
+ push(SharkValue::create_jdouble(
+ builder()->CreateFRem(a->jdouble_value(), b->jdouble_value())));
+ break;
+ case Bytecodes::_dneg:
+ a = pop();
+ push(SharkValue::create_jdouble(
+ builder()->CreateFNeg(a->jdouble_value())));
+ break;
+
+ case Bytecodes::_iinc:
+ i = iter()->get_index();
+ set_local(
+ i,
+ SharkValue::create_jint(
+ builder()->CreateAdd(
+ LLVMValue::jint_constant(iter()->get_iinc_con()),
+ local(i)->jint_value()), false));
+ break;
+
+ case Bytecodes::_lcmp:
+ do_lcmp();
+ break;
+
+ case Bytecodes::_fcmpl:
+ do_fcmp(false, false);
+ break;
+ case Bytecodes::_fcmpg:
+ do_fcmp(false, true);
+ break;
+ case Bytecodes::_dcmpl:
+ do_fcmp(true, false);
+ break;
+ case Bytecodes::_dcmpg:
+ do_fcmp(true, true);
+ break;
+
+ case Bytecodes::_i2l:
+ a = pop();
+ push(SharkValue::create_jlong(
+ builder()->CreateIntCast(
+ a->jint_value(), SharkType::jlong_type(), true), a->zero_checked()));
+ break;
+ case Bytecodes::_i2f:
+ push(SharkValue::create_jfloat(
+ builder()->CreateSIToFP(
+ pop()->jint_value(), SharkType::jfloat_type())));
+ break;
+ case Bytecodes::_i2d:
+ push(SharkValue::create_jdouble(
+ builder()->CreateSIToFP(
+ pop()->jint_value(), SharkType::jdouble_type())));
+ break;
+
+ case Bytecodes::_l2i:
+ push(SharkValue::create_jint(
+ builder()->CreateIntCast(
+ pop()->jlong_value(), SharkType::jint_type(), true), false));
+ break;
+ case Bytecodes::_l2f:
+ push(SharkValue::create_jfloat(
+ builder()->CreateSIToFP(
+ pop()->jlong_value(), SharkType::jfloat_type())));
+ break;
+ case Bytecodes::_l2d:
+ push(SharkValue::create_jdouble(
+ builder()->CreateSIToFP(
+ pop()->jlong_value(), SharkType::jdouble_type())));
+ break;
+
+ case Bytecodes::_f2i:
+ push(SharkValue::create_jint(
+ builder()->CreateCall(
+ builder()->f2i(), pop()->jfloat_value()), false));
+ break;
+ case Bytecodes::_f2l:
+ push(SharkValue::create_jlong(
+ builder()->CreateCall(
+ builder()->f2l(), pop()->jfloat_value()), false));
+ break;
+ case Bytecodes::_f2d:
+ push(SharkValue::create_jdouble(
+ builder()->CreateFPExt(
+ pop()->jfloat_value(), SharkType::jdouble_type())));
+ break;
+
+ case Bytecodes::_d2i:
+ push(SharkValue::create_jint(
+ builder()->CreateCall(
+ builder()->d2i(), pop()->jdouble_value()), false));
+ break;
+ case Bytecodes::_d2l:
+ push(SharkValue::create_jlong(
+ builder()->CreateCall(
+ builder()->d2l(), pop()->jdouble_value()), false));
+ break;
+ case Bytecodes::_d2f:
+ push(SharkValue::create_jfloat(
+ builder()->CreateFPTrunc(
+ pop()->jdouble_value(), SharkType::jfloat_type())));
+ break;
+
+ case Bytecodes::_i2b:
+ push(SharkValue::create_jint(
+ builder()->CreateAShr(
+ builder()->CreateShl(
+ pop()->jint_value(),
+ LLVMValue::jint_constant(24)),
+ LLVMValue::jint_constant(24)), false));
+ break;
+ case Bytecodes::_i2c:
+ push(SharkValue::create_jint(
+ builder()->CreateAnd(
+ pop()->jint_value(),
+ LLVMValue::jint_constant(0xffff)), false));
+ break;
+ case Bytecodes::_i2s:
+ push(SharkValue::create_jint(
+ builder()->CreateAShr(
+ builder()->CreateShl(
+ pop()->jint_value(),
+ LLVMValue::jint_constant(16)),
+ LLVMValue::jint_constant(16)), false));
+ break;
+
+ case Bytecodes::_return:
+ do_return(T_VOID);
+ break;
+ case Bytecodes::_ireturn:
+ do_return(T_INT);
+ break;
+ case Bytecodes::_lreturn:
+ do_return(T_LONG);
+ break;
+ case Bytecodes::_freturn:
+ do_return(T_FLOAT);
+ break;
+ case Bytecodes::_dreturn:
+ do_return(T_DOUBLE);
+ break;
+ case Bytecodes::_areturn:
+ do_return(T_OBJECT);
+ break;
+
+ case Bytecodes::_athrow:
+ do_athrow();
+ break;
+
+ case Bytecodes::_goto:
+ case Bytecodes::_goto_w:
+ do_goto();
+ break;
+
+ case Bytecodes::_jsr:
+ case Bytecodes::_jsr_w:
+ do_jsr();
+ break;
+
+ case Bytecodes::_ret:
+ do_ret();
+ break;
+
+ case Bytecodes::_ifnull:
+ do_if(ICmpInst::ICMP_EQ, SharkValue::null(), pop());
+ break;
+ case Bytecodes::_ifnonnull:
+ do_if(ICmpInst::ICMP_NE, SharkValue::null(), pop());
+ break;
+ case Bytecodes::_if_acmpeq:
+ b = pop();
+ a = pop();
+ do_if(ICmpInst::ICMP_EQ, b, a);
+ break;
+ case Bytecodes::_if_acmpne:
+ b = pop();
+ a = pop();
+ do_if(ICmpInst::ICMP_NE, b, a);
+ break;
+ case Bytecodes::_ifeq:
+ do_if(ICmpInst::ICMP_EQ, SharkValue::jint_constant(0), pop());
+ break;
+ case Bytecodes::_ifne:
+ do_if(ICmpInst::ICMP_NE, SharkValue::jint_constant(0), pop());
+ break;
+ case Bytecodes::_iflt:
+ do_if(ICmpInst::ICMP_SLT, SharkValue::jint_constant(0), pop());
+ break;
+ case Bytecodes::_ifle:
+ do_if(ICmpInst::ICMP_SLE, SharkValue::jint_constant(0), pop());
+ break;
+ case Bytecodes::_ifgt:
+ do_if(ICmpInst::ICMP_SGT, SharkValue::jint_constant(0), pop());
+ break;
+ case Bytecodes::_ifge:
+ do_if(ICmpInst::ICMP_SGE, SharkValue::jint_constant(0), pop());
+ break;
+ case Bytecodes::_if_icmpeq:
+ b = pop();
+ a = pop();
+ do_if(ICmpInst::ICMP_EQ, b, a);
+ break;
+ case Bytecodes::_if_icmpne:
+ b = pop();
+ a = pop();
+ do_if(ICmpInst::ICMP_NE, b, a);
+ break;
+ case Bytecodes::_if_icmplt:
+ b = pop();
+ a = pop();
+ do_if(ICmpInst::ICMP_SLT, b, a);
+ break;
+ case Bytecodes::_if_icmple:
+ b = pop();
+ a = pop();
+ do_if(ICmpInst::ICMP_SLE, b, a);
+ break;
+ case Bytecodes::_if_icmpgt:
+ b = pop();
+ a = pop();
+ do_if(ICmpInst::ICMP_SGT, b, a);
+ break;
+ case Bytecodes::_if_icmpge:
+ b = pop();
+ a = pop();
+ do_if(ICmpInst::ICMP_SGE, b, a);
+ break;
+
+ case Bytecodes::_tableswitch:
+ case Bytecodes::_lookupswitch:
+ do_switch();
+ break;
+
+ case Bytecodes::_invokestatic:
+ case Bytecodes::_invokespecial:
+ case Bytecodes::_invokevirtual:
+ case Bytecodes::_invokeinterface:
+ do_call();
+ break;
+
+ case Bytecodes::_instanceof:
+ // This is a very common construct:
+ //
+ // if (object instanceof Klass) {
+ // something = (Klass) object;
+ // ...
+ // }
+ //
+ // which gets compiled to something like this:
+ //
+ // 28: aload 9
+ // 30: instanceof <Class Klass>
+ // 33: ifeq 52
+ // 36: aload 9
+ // 38: checkcast <Class Klass>
+ //
+ // Handling both bytecodes at once allows us
+ // to eliminate the checkcast.
+ if (iter()->next_bci() < limit &&
+ (iter()->next_bc() == Bytecodes::_ifeq ||
+ iter()->next_bc() == Bytecodes::_ifne) &&
+ (!UseLoopSafepoints ||
+ iter()->next_get_dest() > iter()->next_bci())) {
+ if (maybe_do_instanceof_if()) {
+ iter()->next();
+ if (SharkTraceBytecodes)
+ tty->print_cr("%4d: %s", bci(), Bytecodes::name(bc()));
+ break;
+ }
+ }
+ // fall through
+ case Bytecodes::_checkcast:
+ do_instance_check();
+ break;
+
+ case Bytecodes::_new:
+ do_new();
+ break;
+ case Bytecodes::_newarray:
+ do_newarray();
+ break;
+ case Bytecodes::_anewarray:
+ do_anewarray();
+ break;
+ case Bytecodes::_multianewarray:
+ do_multianewarray();
+ break;
+
+ case Bytecodes::_monitorenter:
+ do_monitorenter();
+ break;
+ case Bytecodes::_monitorexit:
+ do_monitorexit();
+ break;
+
+ default:
+ ShouldNotReachHere();
+ }
+ }
+}
+
+SharkState* SharkBlock::initial_current_state() {
+ return entry_state()->copy();
+}
+
+int SharkBlock::switch_default_dest() {
+ return iter()->get_dest_table(0);
+}
+
+int SharkBlock::switch_table_length() {
+ switch(bc()) {
+ case Bytecodes::_tableswitch:
+ return iter()->get_int_table(2) - iter()->get_int_table(1) + 1;
+
+ case Bytecodes::_lookupswitch:
+ return iter()->get_int_table(1);
+
+ default:
+ ShouldNotReachHere();
+ }
+}
+
+int SharkBlock::switch_key(int i) {
+ switch(bc()) {
+ case Bytecodes::_tableswitch:
+ return iter()->get_int_table(1) + i;
+
+ case Bytecodes::_lookupswitch:
+ return iter()->get_int_table(2 + 2 * i);
+
+ default:
+ ShouldNotReachHere();
+ }
+}
+
+int SharkBlock::switch_dest(int i) {
+ switch(bc()) {
+ case Bytecodes::_tableswitch:
+ return iter()->get_dest_table(i + 3);
+
+ case Bytecodes::_lookupswitch:
+ return iter()->get_dest_table(2 + 2 * i + 1);
+
+ default:
+ ShouldNotReachHere();
+ }
+}
+
+void SharkBlock::do_div_or_rem(bool is_long, bool is_rem) {
+ SharkValue *sb = pop();
+ SharkValue *sa = pop();
+
+ check_divide_by_zero(sb);
+
+ Value *a, *b, *p, *q;
+ if (is_long) {
+ a = sa->jlong_value();
+ b = sb->jlong_value();
+ p = LLVMValue::jlong_constant(0x8000000000000000LL);
+ q = LLVMValue::jlong_constant(-1);
+ }
+ else {
+ a = sa->jint_value();
+ b = sb->jint_value();
+ p = LLVMValue::jint_constant(0x80000000);
+ q = LLVMValue::jint_constant(-1);
+ }
+
+ BasicBlock *ip = builder()->GetBlockInsertionPoint();
+ BasicBlock *special_case = builder()->CreateBlock(ip, "special_case");
+ BasicBlock *general_case = builder()->CreateBlock(ip, "general_case");
+ BasicBlock *done = builder()->CreateBlock(ip, "done");
+
+ builder()->CreateCondBr(
+ builder()->CreateAnd(
+ builder()->CreateICmpEQ(a, p),
+ builder()->CreateICmpEQ(b, q)),
+ special_case, general_case);
+
+ builder()->SetInsertPoint(special_case);
+ Value *special_result;
+ if (is_rem) {
+ if (is_long)
+ special_result = LLVMValue::jlong_constant(0);
+ else
+ special_result = LLVMValue::jint_constant(0);
+ }
+ else {
+ special_result = a;
+ }
+ builder()->CreateBr(done);
+
+ builder()->SetInsertPoint(general_case);
+ Value *general_result;
+ if (is_rem)
+ general_result = builder()->CreateSRem(a, b);
+ else
+ general_result = builder()->CreateSDiv(a, b);
+ builder()->CreateBr(done);
+
+ builder()->SetInsertPoint(done);
+ PHINode *result;
+ if (is_long)
+ result = builder()->CreatePHI(SharkType::jlong_type(), "result");
+ else
+ result = builder()->CreatePHI(SharkType::jint_type(), "result");
+ result->addIncoming(special_result, special_case);
+ result->addIncoming(general_result, general_case);
+
+ if (is_long)
+ push(SharkValue::create_jlong(result, false));
+ else
+ push(SharkValue::create_jint(result, false));
+}
+
+void SharkBlock::do_field_access(bool is_get, bool is_field) {
+ bool will_link;
+ ciField *field = iter()->get_field(will_link);
+ assert(will_link, "typeflow responsibility");
+ assert(is_field != field->is_static(), "mismatch");
+
+ // Pop the value off the stack where necessary
+ SharkValue *value = NULL;
+ if (!is_get)
+ value = pop();
+
+ // Find the object we're accessing, if necessary
+ Value *object = NULL;
+ if (is_field) {
+ SharkValue *value = pop();
+ check_null(value);
+ object = value->generic_value();
+ }
+ if (is_get && field->is_constant()) {
+ SharkConstant *constant = SharkConstant::for_field(iter());
+ if (constant->is_loaded())
+ value = constant->value(builder());
+ }
+ if (!is_get || value == NULL) {
+ if (!is_field)
+ object = builder()->CreateInlineOop(field->holder());
+
+ BasicType basic_type = field->type()->basic_type();
+ const Type *stack_type = SharkType::to_stackType(basic_type);
+ const Type *field_type = SharkType::to_arrayType(basic_type);
+
+ Value *addr = builder()->CreateAddressOfStructEntry(
+ object, in_ByteSize(field->offset_in_bytes()),
+ PointerType::getUnqual(field_type),
+ "addr");
+
+ // Do the access
+ if (is_get) {
+ Value *field_value = builder()->CreateLoad(addr);
+
+ if (field_type != stack_type) {
+ field_value = builder()->CreateIntCast(
+ field_value, stack_type, basic_type != T_CHAR);
+ }
+
+ value = SharkValue::create_generic(field->type(), field_value, false);
+ }
+ else {
+ Value *field_value = value->generic_value();
+
+ if (field_type != stack_type) {
+ field_value = builder()->CreateIntCast(
+ field_value, field_type, basic_type != T_CHAR);
+ }
+
+ builder()->CreateStore(field_value, addr);
+
+ if (!field->type()->is_primitive_type())
+ builder()->CreateUpdateBarrierSet(oopDesc::bs(), addr);
+
+ if (field->is_volatile())
+ builder()->CreateMemoryBarrier(SharkBuilder::BARRIER_STORELOAD);
+ }
+ }
+
+ // Push the value onto the stack where necessary
+ if (is_get)
+ push(value);
+}
+
+void SharkBlock::do_lcmp() {
+ Value *b = pop()->jlong_value();
+ Value *a = pop()->jlong_value();
+
+ BasicBlock *ip = builder()->GetBlockInsertionPoint();
+ BasicBlock *ne = builder()->CreateBlock(ip, "lcmp_ne");
+ BasicBlock *lt = builder()->CreateBlock(ip, "lcmp_lt");
+ BasicBlock *gt = builder()->CreateBlock(ip, "lcmp_gt");
+ BasicBlock *done = builder()->CreateBlock(ip, "done");
+
+ BasicBlock *eq = builder()->GetInsertBlock();
+ builder()->CreateCondBr(builder()->CreateICmpEQ(a, b), done, ne);
+
+ builder()->SetInsertPoint(ne);
+ builder()->CreateCondBr(builder()->CreateICmpSLT(a, b), lt, gt);
+
+ builder()->SetInsertPoint(lt);
+ builder()->CreateBr(done);
+
+ builder()->SetInsertPoint(gt);
+ builder()->CreateBr(done);
+
+ builder()->SetInsertPoint(done);
+ PHINode *result = builder()->CreatePHI(SharkType::jint_type(), "result");
+ result->addIncoming(LLVMValue::jint_constant(-1), lt);
+ result->addIncoming(LLVMValue::jint_constant(0), eq);
+ result->addIncoming(LLVMValue::jint_constant(1), gt);
+
+ push(SharkValue::create_jint(result, false));
+}
+
+void SharkBlock::do_fcmp(bool is_double, bool unordered_is_greater) {
+ Value *a, *b;
+ if (is_double) {
+ b = pop()->jdouble_value();
+ a = pop()->jdouble_value();
+ }
+ else {
+ b = pop()->jfloat_value();
+ a = pop()->jfloat_value();
+ }
+
+ BasicBlock *ip = builder()->GetBlockInsertionPoint();
+ BasicBlock *ordered = builder()->CreateBlock(ip, "ordered");
+ BasicBlock *ge = builder()->CreateBlock(ip, "fcmp_ge");
+ BasicBlock *lt = builder()->CreateBlock(ip, "fcmp_lt");
+ BasicBlock *eq = builder()->CreateBlock(ip, "fcmp_eq");
+ BasicBlock *gt = builder()->CreateBlock(ip, "fcmp_gt");
+ BasicBlock *done = builder()->CreateBlock(ip, "done");
+
+ builder()->CreateCondBr(
+ builder()->CreateFCmpUNO(a, b),
+ unordered_is_greater ? gt : lt, ordered);
+
+ builder()->SetInsertPoint(ordered);
+ builder()->CreateCondBr(builder()->CreateFCmpULT(a, b), lt, ge);
+
+ builder()->SetInsertPoint(ge);
+ builder()->CreateCondBr(builder()->CreateFCmpUGT(a, b), gt, eq);
+
+ builder()->SetInsertPoint(lt);
+ builder()->CreateBr(done);
+
+ builder()->SetInsertPoint(gt);
+ builder()->CreateBr(done);
+
+ builder()->SetInsertPoint(eq);
+ builder()->CreateBr(done);
+
+ builder()->SetInsertPoint(done);
+ PHINode *result = builder()->CreatePHI(SharkType::jint_type(), "result");
+ result->addIncoming(LLVMValue::jint_constant(-1), lt);
+ result->addIncoming(LLVMValue::jint_constant(0), eq);
+ result->addIncoming(LLVMValue::jint_constant(1), gt);
+
+ push(SharkValue::create_jint(result, false));
+}
+
+void SharkBlock::emit_IR() {
+ ShouldNotCallThis();
+}
+
+SharkState* SharkBlock::entry_state() {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::do_zero_check(SharkValue* value) {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::maybe_add_backedge_safepoint() {
+ ShouldNotCallThis();
+}
+
+bool SharkBlock::has_trap() {
+ return false;
+}
+
+int SharkBlock::trap_request() {
+ ShouldNotCallThis();
+}
+
+int SharkBlock::trap_bci() {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::do_trap(int trap_request) {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::do_arraylength() {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::do_aload(BasicType basic_type) {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::do_astore(BasicType basic_type) {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::do_return(BasicType type) {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::do_athrow() {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::do_goto() {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::do_jsr() {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::do_ret() {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::do_if(ICmpInst::Predicate p, SharkValue* b, SharkValue* a) {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::do_switch() {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::do_call() {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::do_instance_check() {
+ ShouldNotCallThis();
+}
+
+bool SharkBlock::maybe_do_instanceof_if() {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::do_new() {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::do_newarray() {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::do_anewarray() {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::do_multianewarray() {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::do_monitorenter() {
+ ShouldNotCallThis();
+}
+
+void SharkBlock::do_monitorexit() {
+ ShouldNotCallThis();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkBlock.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+class SharkState;
+
+class SharkBlock : public SharkTargetInvariants {
+ protected:
+ SharkBlock(const SharkTargetInvariants* parent)
+ : SharkTargetInvariants(parent),
+ _iter(target()),
+ _current_state(NULL) {}
+
+ SharkBlock(const SharkCompileInvariants* parent, ciMethod* target)
+ : SharkTargetInvariants(parent, target),
+ _iter(target),
+ _current_state(NULL) {}
+
+ private:
+ ciBytecodeStream _iter;
+ SharkState* _current_state;
+
+ public:
+ ciBytecodeStream* iter() {
+ return &_iter;
+ }
+ Bytecodes::Code bc() {
+ return iter()->cur_bc();
+ }
+ int bci() {
+ return iter()->cur_bci();
+ }
+
+ // Entry state
+ protected:
+ virtual SharkState* entry_state();
+
+ // Current state
+ private:
+ SharkState* initial_current_state();
+
+ public:
+ SharkState* current_state() {
+ if (_current_state == NULL)
+ set_current_state(initial_current_state());
+ return _current_state;
+ }
+
+ protected:
+ void set_current_state(SharkState* current_state) {
+ _current_state = current_state;
+ }
+
+ // Local variables
+ protected:
+ SharkValue* local(int index) {
+ SharkValue *value = current_state()->local(index);
+ assert(value != NULL, "shouldn't be");
+ assert(value->is_one_word() ||
+ (index + 1 < max_locals() &&
+ current_state()->local(index + 1) == NULL), "should be");
+ return value;
+ }
+ void set_local(int index, SharkValue* value) {
+ assert(value != NULL, "shouldn't be");
+ current_state()->set_local(index, value);
+ if (value->is_two_word())
+ current_state()->set_local(index + 1, NULL);
+ }
+
+ // Expression stack (raw)
+ protected:
+ void xpush(SharkValue* value) {
+ current_state()->push(value);
+ }
+ SharkValue* xpop() {
+ return current_state()->pop();
+ }
+ SharkValue* xstack(int slot) {
+ SharkValue *value = current_state()->stack(slot);
+ assert(value != NULL, "shouldn't be");
+ assert(value->is_one_word() ||
+ (slot > 0 &&
+ current_state()->stack(slot - 1) == NULL), "should be");
+ return value;
+ }
+ int xstack_depth() {
+ return current_state()->stack_depth();
+ }
+
+ // Expression stack (cooked)
+ protected:
+ void push(SharkValue* value) {
+ assert(value != NULL, "shouldn't be");
+ xpush(value);
+ if (value->is_two_word())
+ xpush(NULL);
+ }
+ SharkValue* pop() {
+ int size = current_state()->stack(0) == NULL ? 2 : 1;
+ if (size == 2)
+ xpop();
+ SharkValue *value = xpop();
+ assert(value && value->size() == size, "should be");
+ return value;
+ }
+ SharkValue* pop_result(BasicType type) {
+ SharkValue *result = pop();
+
+#ifdef ASSERT
+ switch (result->basic_type()) {
+ case T_BOOLEAN:
+ case T_BYTE:
+ case T_CHAR:
+ case T_SHORT:
+ assert(type == T_INT, "type mismatch");
+ break;
+
+ case T_ARRAY:
+ assert(type == T_OBJECT, "type mismatch");
+ break;
+
+ default:
+ assert(result->basic_type() == type, "type mismatch");
+ }
+#endif // ASSERT
+
+ return result;
+ }
+
+ // Code generation
+ public:
+ virtual void emit_IR();
+
+ protected:
+ void parse_bytecode(int start, int limit);
+
+ // Helpers
+ protected:
+ virtual void do_zero_check(SharkValue* value);
+
+ // Zero checking
+ protected:
+ void check_null(SharkValue* object) {
+ zero_check(object);
+ }
+ void check_divide_by_zero(SharkValue* value) {
+ zero_check(value);
+ }
+ private:
+ void zero_check(SharkValue* value) {
+ if (!value->zero_checked())
+ do_zero_check(value);
+ }
+
+ // Safepoints
+ protected:
+ virtual void maybe_add_backedge_safepoint();
+
+ // Traps
+ protected:
+ virtual bool has_trap();
+ virtual int trap_request();
+ virtual int trap_bci();
+ virtual void do_trap(int trap_request);
+
+ // arraylength
+ protected:
+ virtual void do_arraylength();
+
+ // *aload and *astore
+ protected:
+ virtual void do_aload(BasicType basic_type);
+ virtual void do_astore(BasicType basic_type);
+
+ // *div and *rem
+ private:
+ void do_idiv() {
+ do_div_or_rem(false, false);
+ }
+ void do_irem() {
+ do_div_or_rem(false, true);
+ }
+ void do_ldiv() {
+ do_div_or_rem(true, false);
+ }
+ void do_lrem() {
+ do_div_or_rem(true, true);
+ }
+ void do_div_or_rem(bool is_long, bool is_rem);
+
+ // get* and put*
+ private:
+ void do_getstatic() {
+ do_field_access(true, false);
+ }
+ void do_getfield() {
+ do_field_access(true, true);
+ }
+ void do_putstatic() {
+ do_field_access(false, false);
+ }
+ void do_putfield() {
+ do_field_access(false, true);
+ }
+ void do_field_access(bool is_get, bool is_field);
+
+ // lcmp and [fd]cmp[lg]
+ private:
+ void do_lcmp();
+ void do_fcmp(bool is_double, bool unordered_is_greater);
+
+ // *return and athrow
+ protected:
+ virtual void do_return(BasicType type);
+ virtual void do_athrow();
+
+ // goto*
+ protected:
+ virtual void do_goto();
+
+ // jsr* and ret
+ protected:
+ virtual void do_jsr();
+ virtual void do_ret();
+
+ // if*
+ protected:
+ virtual void do_if(llvm::ICmpInst::Predicate p, SharkValue* b, SharkValue* a);
+
+ // *switch
+ protected:
+ int switch_default_dest();
+ int switch_table_length();
+ int switch_key(int i);
+ int switch_dest(int i);
+
+ virtual void do_switch();
+
+ // invoke*
+ protected:
+ virtual void do_call();
+
+ // checkcast and instanceof
+ protected:
+ virtual void do_instance_check();
+ virtual bool maybe_do_instanceof_if();
+
+ // new and *newarray
+ protected:
+ virtual void do_new();
+ virtual void do_newarray();
+ virtual void do_anewarray();
+ virtual void do_multianewarray();
+
+ // monitorenter and monitorexit
+ protected:
+ virtual void do_monitorenter();
+ virtual void do_monitorexit();
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkBuilder.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,591 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009, 2010 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharkBuilder.cpp.incl"
+
+using namespace llvm;
+
+SharkBuilder::SharkBuilder(SharkCodeBuffer* code_buffer)
+ : IRBuilder<>(SharkContext::current()),
+ _code_buffer(code_buffer) {
+}
+
+// Helpers for accessing structures
+Value* SharkBuilder::CreateAddressOfStructEntry(Value* base,
+ ByteSize offset,
+ const Type* type,
+ const char* name) {
+ return CreateBitCast(CreateStructGEP(base, in_bytes(offset)), type, name);
+}
+
+LoadInst* SharkBuilder::CreateValueOfStructEntry(Value* base,
+ ByteSize offset,
+ const Type* type,
+ const char* name) {
+ return CreateLoad(
+ CreateAddressOfStructEntry(
+ base, offset, PointerType::getUnqual(type)),
+ name);
+}
+
+// Helpers for accessing arrays
+
+LoadInst* SharkBuilder::CreateArrayLength(Value* arrayoop) {
+ return CreateValueOfStructEntry(
+ arrayoop, in_ByteSize(arrayOopDesc::length_offset_in_bytes()),
+ SharkType::jint_type(), "length");
+}
+
+Value* SharkBuilder::CreateArrayAddress(Value* arrayoop,
+ const Type* element_type,
+ int element_bytes,
+ ByteSize base_offset,
+ Value* index,
+ const char* name) {
+ Value* offset = CreateIntCast(index, SharkType::intptr_type(), false);
+ if (element_bytes != 1)
+ offset = CreateShl(
+ offset,
+ LLVMValue::intptr_constant(exact_log2(element_bytes)));
+ offset = CreateAdd(
+ LLVMValue::intptr_constant(in_bytes(base_offset)), offset);
+
+ return CreateIntToPtr(
+ CreateAdd(CreatePtrToInt(arrayoop, SharkType::intptr_type()), offset),
+ PointerType::getUnqual(element_type),
+ name);
+}
+
+Value* SharkBuilder::CreateArrayAddress(Value* arrayoop,
+ BasicType basic_type,
+ ByteSize base_offset,
+ Value* index,
+ const char* name) {
+ return CreateArrayAddress(
+ arrayoop,
+ SharkType::to_arrayType(basic_type),
+ type2aelembytes(basic_type),
+ base_offset, index, name);
+}
+
+Value* SharkBuilder::CreateArrayAddress(Value* arrayoop,
+ BasicType basic_type,
+ Value* index,
+ const char* name) {
+ return CreateArrayAddress(
+ arrayoop, basic_type,
+ in_ByteSize(arrayOopDesc::base_offset_in_bytes(basic_type)),
+ index, name);
+}
+
+// Helpers for creating intrinsics and external functions.
+
+const Type* SharkBuilder::make_type(char type, bool void_ok) {
+ switch (type) {
+ // Primitive types
+ case 'c':
+ return SharkType::jbyte_type();
+ case 'i':
+ return SharkType::jint_type();
+ case 'l':
+ return SharkType::jlong_type();
+ case 'x':
+ return SharkType::intptr_type();
+ case 'f':
+ return SharkType::jfloat_type();
+ case 'd':
+ return SharkType::jdouble_type();
+
+ // Pointers to primitive types
+ case 'C':
+ case 'I':
+ case 'L':
+ case 'X':
+ case 'F':
+ case 'D':
+ return PointerType::getUnqual(make_type(tolower(type), false));
+
+ // VM objects
+ case 'T':
+ return SharkType::thread_type();
+ case 'M':
+ return PointerType::getUnqual(SharkType::monitor_type());
+ case 'O':
+ return SharkType::oop_type();
+
+ // Miscellaneous
+ case 'v':
+ assert(void_ok, "should be");
+ return SharkType::void_type();
+ case '1':
+ return SharkType::bit_type();
+
+ default:
+ ShouldNotReachHere();
+ }
+}
+
+const FunctionType* SharkBuilder::make_ftype(const char* params,
+ const char* ret) {
+ std::vector<const Type*> param_types;
+ for (const char* c = params; *c; c++)
+ param_types.push_back(make_type(*c, false));
+
+ assert(strlen(ret) == 1, "should be");
+ const Type *return_type = make_type(*ret, true);
+
+ return FunctionType::get(return_type, param_types, false);
+}
+
+// Create an object representing an intrinsic or external function by
+// referencing the symbol by name. This is the LLVM-style approach,
+// but it cannot be used on functions within libjvm.so its symbols
+// are not exported. Note that you cannot make this work simply by
+// exporting the symbols, as some symbols have the same names as
+// symbols in the standard libraries (eg, atan2, fabs) and would
+// obscure them were they visible.
+Value* SharkBuilder::make_function(const char* name,
+ const char* params,
+ const char* ret) {
+ return SharkContext::current().get_external(name, make_ftype(params, ret));
+}
+
+// Create an object representing an external function by inlining a
+// function pointer in the code. This is not the LLVM way, but it's
+// the only way to access functions in libjvm.so and functions like
+// __kernel_dmb on ARM which is accessed via an absolute address.
+Value* SharkBuilder::make_function(address func,
+ const char* params,
+ const char* ret) {
+ return CreateIntToPtr(
+ LLVMValue::intptr_constant((intptr_t) func),
+ PointerType::getUnqual(make_ftype(params, ret)));
+}
+
+// VM calls
+
+Value* SharkBuilder::find_exception_handler() {
+ return make_function(
+ (address) SharkRuntime::find_exception_handler, "TIi", "i");
+}
+
+Value* SharkBuilder::monitorenter() {
+ return make_function((address) SharkRuntime::monitorenter, "TM", "v");
+}
+
+Value* SharkBuilder::monitorexit() {
+ return make_function((address) SharkRuntime::monitorexit, "TM", "v");
+}
+
+Value* SharkBuilder::new_instance() {
+ return make_function((address) SharkRuntime::new_instance, "Ti", "v");
+}
+
+Value* SharkBuilder::newarray() {
+ return make_function((address) SharkRuntime::newarray, "Tii", "v");
+}
+
+Value* SharkBuilder::anewarray() {
+ return make_function((address) SharkRuntime::anewarray, "Tii", "v");
+}
+
+Value* SharkBuilder::multianewarray() {
+ return make_function((address) SharkRuntime::multianewarray, "TiiI", "v");
+}
+
+Value* SharkBuilder::register_finalizer() {
+ return make_function((address) SharkRuntime::register_finalizer, "TO", "v");
+}
+
+Value* SharkBuilder::safepoint() {
+ return make_function((address) SafepointSynchronize::block, "T", "v");
+}
+
+Value* SharkBuilder::throw_ArithmeticException() {
+ return make_function(
+ (address) SharkRuntime::throw_ArithmeticException, "TCi", "v");
+}
+
+Value* SharkBuilder::throw_ArrayIndexOutOfBoundsException() {
+ return make_function(
+ (address) SharkRuntime::throw_ArrayIndexOutOfBoundsException, "TCii", "v");
+}
+
+Value* SharkBuilder::throw_ClassCastException() {
+ return make_function(
+ (address) SharkRuntime::throw_ClassCastException, "TCi", "v");
+}
+
+Value* SharkBuilder::throw_NullPointerException() {
+ return make_function(
+ (address) SharkRuntime::throw_NullPointerException, "TCi", "v");
+}
+
+// High-level non-VM calls
+
+Value* SharkBuilder::f2i() {
+ return make_function((address) SharedRuntime::f2i, "f", "i");
+}
+
+Value* SharkBuilder::f2l() {
+ return make_function((address) SharedRuntime::f2l, "f", "l");
+}
+
+Value* SharkBuilder::d2i() {
+ return make_function((address) SharedRuntime::d2i, "d", "i");
+}
+
+Value* SharkBuilder::d2l() {
+ return make_function((address) SharedRuntime::d2l, "d", "l");
+}
+
+Value* SharkBuilder::is_subtype_of() {
+ return make_function((address) SharkRuntime::is_subtype_of, "OO", "c");
+}
+
+Value* SharkBuilder::current_time_millis() {
+ return make_function((address) os::javaTimeMillis, "", "l");
+}
+
+Value* SharkBuilder::sin() {
+ return make_function("llvm.sin.f64", "d", "d");
+}
+
+Value* SharkBuilder::cos() {
+ return make_function("llvm.cos.f64", "d", "d");
+}
+
+Value* SharkBuilder::tan() {
+ return make_function((address) ::tan, "d", "d");
+}
+
+Value* SharkBuilder::atan2() {
+ return make_function((address) ::atan2, "dd", "d");
+}
+
+Value* SharkBuilder::sqrt() {
+ return make_function("llvm.sqrt.f64", "d", "d");
+}
+
+Value* SharkBuilder::log() {
+ return make_function("llvm.log.f64", "d", "d");
+}
+
+Value* SharkBuilder::log10() {
+ return make_function("llvm.log10.f64", "d", "d");
+}
+
+Value* SharkBuilder::pow() {
+ return make_function("llvm.pow.f64", "dd", "d");
+}
+
+Value* SharkBuilder::exp() {
+ return make_function("llvm.exp.f64", "d", "d");
+}
+
+Value* SharkBuilder::fabs() {
+ return make_function((address) ::fabs, "d", "d");
+}
+
+Value* SharkBuilder::unsafe_field_offset_to_byte_offset() {
+ extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset);
+ return make_function((address) Unsafe_field_offset_to_byte_offset, "l", "l");
+}
+
+Value* SharkBuilder::osr_migration_end() {
+ return make_function((address) SharedRuntime::OSR_migration_end, "C", "v");
+}
+
+// Semi-VM calls
+
+Value* SharkBuilder::throw_StackOverflowError() {
+ return make_function((address) ZeroStack::handle_overflow, "T", "v");
+}
+
+Value* SharkBuilder::uncommon_trap() {
+ return make_function((address) SharkRuntime::uncommon_trap, "Ti", "i");
+}
+
+Value* SharkBuilder::deoptimized_entry_point() {
+ return make_function((address) CppInterpreter::main_loop, "iT", "v");
+}
+
+// Native-Java transition
+
+Value* SharkBuilder::check_special_condition_for_native_trans() {
+ return make_function(
+ (address) JavaThread::check_special_condition_for_native_trans,
+ "T", "v");
+}
+
+// Low-level non-VM calls
+
+// The ARM-specific code here is to work around unimplemented
+// atomic exchange and memory barrier intrinsics in LLVM.
+//
+// Delegating to external functions for these would normally
+// incur a speed penalty, but Linux on ARM is a special case
+// in that atomic operations on that platform are handled by
+// external functions anyway. It would be *preferable* for
+// the calls to be hidden away in LLVM, but it's not hurting
+// performance so having the calls here is acceptable.
+//
+// If you are building Shark on a platform without atomic
+// exchange and/or memory barrier intrinsics then it is only
+// acceptable to mimic this approach if your platform cannot
+// perform these operations without delegating to a function.
+
+#ifdef ARM
+static jint zero_cmpxchg_int(volatile jint *ptr, jint oldval, jint newval) {
+ return Atomic::cmpxchg(newval, ptr, oldval);
+}
+#endif // ARM
+
+Value* SharkBuilder::cmpxchg_int() {
+ return make_function(
+#ifdef ARM
+ (address) zero_cmpxchg_int,
+#else
+ "llvm.atomic.cmp.swap.i32.p0i32",
+#endif // ARM
+ "Iii", "i");
+}
+
+#ifdef ARM
+static intptr_t zero_cmpxchg_ptr(volatile intptr_t* ptr,
+ intptr_t oldval,
+ intptr_t newval) {
+ return Atomic::cmpxchg_ptr(newval, ptr, oldval);
+}
+#endif // ARM
+
+Value* SharkBuilder::cmpxchg_ptr() {
+ return make_function(
+#ifdef ARM
+ (address) zero_cmpxchg_ptr,
+#else
+ "llvm.atomic.cmp.swap.i" LP64_ONLY("64") NOT_LP64("32") ".p0i" LP64_ONLY("64") NOT_LP64("32"),
+#endif // ARM
+ "Xxx", "x");
+}
+
+Value* SharkBuilder::frame_address() {
+ return make_function("llvm.frameaddress", "i", "C");
+}
+
+Value* SharkBuilder::memory_barrier() {
+ return make_function(
+#ifdef ARM
+ (address) 0xffff0fa0, // __kernel_dmb
+#else
+ "llvm.memory.barrier",
+#endif // ARM
+ "11111", "v");
+}
+
+Value* SharkBuilder::memset() {
+#if SHARK_LLVM_VERSION >= 28
+ // LLVM 2.8 added a fifth isVolatile field for memset
+ // introduced with LLVM r100304
+ return make_function("llvm.memset.i32", "Cciii", "v");
+#else
+ return make_function("llvm.memset.i32", "Ccii", "v");
+#endif
+}
+
+Value* SharkBuilder::unimplemented() {
+ return make_function((address) report_unimplemented, "Ci", "v");
+}
+
+Value* SharkBuilder::should_not_reach_here() {
+ return make_function((address) report_should_not_reach_here, "Ci", "v");
+}
+
+Value* SharkBuilder::dump() {
+ return make_function((address) SharkRuntime::dump, "Cx", "v");
+}
+
+// Public interface to low-level non-VM calls
+
+CallInst* SharkBuilder::CreateCmpxchgInt(Value* exchange_value,
+ Value* dst,
+ Value* compare_value) {
+ return CreateCall3(cmpxchg_int(), dst, compare_value, exchange_value);
+}
+
+CallInst* SharkBuilder::CreateCmpxchgPtr(Value* exchange_value,
+ Value* dst,
+ Value* compare_value) {
+ return CreateCall3(cmpxchg_ptr(), dst, compare_value, exchange_value);
+}
+
+CallInst* SharkBuilder::CreateGetFrameAddress() {
+ return CreateCall(frame_address(), LLVMValue::jint_constant(0));
+}
+
+CallInst *SharkBuilder::CreateMemoryBarrier(int flags) {
+ Value *args[] = {
+ LLVMValue::bit_constant((flags & BARRIER_LOADLOAD) ? 1 : 0),
+ LLVMValue::bit_constant((flags & BARRIER_LOADSTORE) ? 1 : 0),
+ LLVMValue::bit_constant((flags & BARRIER_STORELOAD) ? 1 : 0),
+ LLVMValue::bit_constant((flags & BARRIER_STORESTORE) ? 1 : 0),
+ LLVMValue::bit_constant(1)};
+
+ return CreateCall(memory_barrier(), args, args + 5);
+}
+
+CallInst* SharkBuilder::CreateMemset(Value* dst,
+ Value* value,
+ Value* len,
+ Value* align) {
+#if SHARK_LLVM_VERSION >= 28
+ return CreateCall5(memset(), dst, value, len, align,
+ LLVMValue::jint_constant(0));
+#else
+ return CreateCall4(memset(), dst, value, len, align);
+#endif
+}
+
+CallInst* SharkBuilder::CreateUnimplemented(const char* file, int line) {
+ return CreateCall2(
+ unimplemented(),
+ CreateIntToPtr(
+ LLVMValue::intptr_constant((intptr_t) file),
+ PointerType::getUnqual(SharkType::jbyte_type())),
+ LLVMValue::jint_constant(line));
+}
+
+CallInst* SharkBuilder::CreateShouldNotReachHere(const char* file, int line) {
+ return CreateCall2(
+ should_not_reach_here(),
+ CreateIntToPtr(
+ LLVMValue::intptr_constant((intptr_t) file),
+ PointerType::getUnqual(SharkType::jbyte_type())),
+ LLVMValue::jint_constant(line));
+}
+
+#ifndef PRODUCT
+CallInst* SharkBuilder::CreateDump(Value* value) {
+ const char *name;
+ if (value->hasName())
+ // XXX this leaks, but it's only debug code
+ name = strdup(value->getName().str().c_str());
+ else
+ name = "unnamed_value";
+
+ if (isa<PointerType>(value->getType()))
+ value = CreatePtrToInt(value, SharkType::intptr_type());
+ else if (value->getType()->
+#if SHARK_LLVM_VERSION >= 27
+ isIntegerTy()
+#else
+ isInteger()
+#endif
+ )
+ value = CreateIntCast(value, SharkType::intptr_type(), false);
+ else
+ Unimplemented();
+
+ return CreateCall2(
+ dump(),
+ CreateIntToPtr(
+ LLVMValue::intptr_constant((intptr_t) name),
+ PointerType::getUnqual(SharkType::jbyte_type())),
+ value);
+}
+#endif // PRODUCT
+
+// HotSpot memory barriers
+
+void SharkBuilder::CreateUpdateBarrierSet(BarrierSet* bs, Value* field) {
+ if (bs->kind() != BarrierSet::CardTableModRef)
+ Unimplemented();
+
+ CreateStore(
+ LLVMValue::jbyte_constant(CardTableModRefBS::dirty_card),
+ CreateIntToPtr(
+ CreateAdd(
+ LLVMValue::intptr_constant(
+ (intptr_t) ((CardTableModRefBS *) bs)->byte_map_base),
+ CreateLShr(
+ CreatePtrToInt(field, SharkType::intptr_type()),
+ LLVMValue::intptr_constant(CardTableModRefBS::card_shift))),
+ PointerType::getUnqual(SharkType::jbyte_type())));
+}
+
+// Helpers for accessing the code buffer
+
+Value* SharkBuilder::code_buffer_address(int offset) {
+ return CreateAdd(
+ code_buffer()->base_pc(),
+ LLVMValue::intptr_constant(offset));
+}
+
+Value* SharkBuilder::CreateInlineOop(jobject object, const char* name) {
+ return CreateLoad(
+ CreateIntToPtr(
+ code_buffer_address(code_buffer()->inline_oop(object)),
+ PointerType::getUnqual(SharkType::oop_type())),
+ name);
+}
+
+Value* SharkBuilder::CreateInlineData(void* data,
+ size_t size,
+ const Type* type,
+ const char* name) {
+ return CreateIntToPtr(
+ code_buffer_address(code_buffer()->inline_data(data, size)),
+ type,
+ name);
+}
+
+// Helpers for creating basic blocks.
+
+BasicBlock* SharkBuilder::GetBlockInsertionPoint() const {
+ BasicBlock *cur = GetInsertBlock();
+
+ // BasicBlock::Create takes an insertBefore argument, so
+ // we need to find the block _after_ the current block
+ Function::iterator iter = cur->getParent()->begin();
+ Function::iterator end = cur->getParent()->end();
+ while (iter != end) {
+ iter++;
+ if (&*iter == cur) {
+ iter++;
+ break;
+ }
+ }
+
+ if (iter == end)
+ return NULL;
+ else
+ return iter;
+}
+
+BasicBlock* SharkBuilder::CreateBlock(BasicBlock* ip, const char* name) const {
+ return BasicBlock::Create(
+ SharkContext::current(), name, GetInsertBlock()->getParent(), ip);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkBuilder.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009, 2010 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+class SharkBuilder : public llvm::IRBuilder<> {
+ friend class SharkCompileInvariants;
+
+ public:
+ SharkBuilder(SharkCodeBuffer* code_buffer);
+
+ // The code buffer we are building into.
+ private:
+ SharkCodeBuffer* _code_buffer;
+
+ protected:
+ SharkCodeBuffer* code_buffer() const {
+ return _code_buffer;
+ }
+
+ // Helpers for accessing structures.
+ public:
+ llvm::Value* CreateAddressOfStructEntry(llvm::Value* base,
+ ByteSize offset,
+ const llvm::Type* type,
+ const char *name = "");
+ llvm::LoadInst* CreateValueOfStructEntry(llvm::Value* base,
+ ByteSize offset,
+ const llvm::Type* type,
+ const char *name = "");
+
+ // Helpers for accessing arrays.
+ public:
+ llvm::LoadInst* CreateArrayLength(llvm::Value* arrayoop);
+ llvm::Value* CreateArrayAddress(llvm::Value* arrayoop,
+ const llvm::Type* element_type,
+ int element_bytes,
+ ByteSize base_offset,
+ llvm::Value* index,
+ const char* name = "");
+ llvm::Value* CreateArrayAddress(llvm::Value* arrayoop,
+ BasicType basic_type,
+ ByteSize base_offset,
+ llvm::Value* index,
+ const char* name = "");
+ llvm::Value* CreateArrayAddress(llvm::Value* arrayoop,
+ BasicType basic_type,
+ llvm::Value* index,
+ const char* name = "");
+
+ // Helpers for creating intrinsics and external functions.
+ private:
+ static const llvm::Type* make_type(char type, bool void_ok);
+ static const llvm::FunctionType* make_ftype(const char* params,
+ const char* ret);
+ llvm::Value* make_function(const char* name,
+ const char* params,
+ const char* ret);
+ llvm::Value* make_function(address func,
+ const char* params,
+ const char* ret);
+
+ // Intrinsics and external functions, part 1: VM calls.
+ // These are functions declared with JRT_ENTRY and JRT_EXIT,
+ // macros which flip the thread from _thread_in_Java to
+ // _thread_in_vm and back. VM calls always safepoint, and can
+ // therefore throw exceptions. VM calls require of setup and
+ // teardown, and must be called with SharkTopLevelBlock::call_vm.
+ public:
+ llvm::Value* find_exception_handler();
+ llvm::Value* monitorenter();
+ llvm::Value* monitorexit();
+ llvm::Value* new_instance();
+ llvm::Value* newarray();
+ llvm::Value* anewarray();
+ llvm::Value* multianewarray();
+ llvm::Value* register_finalizer();
+ llvm::Value* safepoint();
+ llvm::Value* throw_ArithmeticException();
+ llvm::Value* throw_ArrayIndexOutOfBoundsException();
+ llvm::Value* throw_ClassCastException();
+ llvm::Value* throw_NullPointerException();
+
+ // Intrinsics and external functions, part 2: High-level non-VM calls.
+ // These are called like normal functions. The stack is not set
+ // up for walking so they must not safepoint or throw exceptions,
+ // or call anything that might.
+ public:
+ llvm::Value* f2i();
+ llvm::Value* f2l();
+ llvm::Value* d2i();
+ llvm::Value* d2l();
+ llvm::Value* is_subtype_of();
+ llvm::Value* current_time_millis();
+ llvm::Value* sin();
+ llvm::Value* cos();
+ llvm::Value* tan();
+ llvm::Value* atan2();
+ llvm::Value* sqrt();
+ llvm::Value* log();
+ llvm::Value* log10();
+ llvm::Value* pow();
+ llvm::Value* exp();
+ llvm::Value* fabs();
+ llvm::Value* unsafe_field_offset_to_byte_offset();
+ llvm::Value* osr_migration_end();
+
+ // Intrinsics and external functions, part 3: semi-VM calls.
+ // These are special cases that do VM call stuff but are invoked
+ // as though they were normal calls. This is acceptable so long
+ // as the method that calls them returns to its immediately that
+ // the semi VM call returns.
+ public:
+ llvm::Value* throw_StackOverflowError();
+ llvm::Value* uncommon_trap();
+ llvm::Value* deoptimized_entry_point();
+
+ // Intrinsics and external functions, part 4: Native-Java transition.
+ // This is a special case in that it is invoked during a thread
+ // state transition. The stack must be set up for walking, and it
+ // may throw exceptions, but the state is _thread_in_native_trans.
+ public:
+ llvm::Value* check_special_condition_for_native_trans();
+
+ // Intrinsics and external functions, part 5: Low-level non-VM calls.
+ // These have the same caveats as the high-level non-VM calls
+ // above. They are not accessed directly; rather, you should
+ // access them via the various Create* methods below.
+ private:
+ llvm::Value* cmpxchg_int();
+ llvm::Value* cmpxchg_ptr();
+ llvm::Value* frame_address();
+ llvm::Value* memory_barrier();
+ llvm::Value* memset();
+ llvm::Value* unimplemented();
+ llvm::Value* should_not_reach_here();
+ llvm::Value* dump();
+
+ // Public interface to low-level non-VM calls.
+ public:
+ llvm::CallInst* CreateCmpxchgInt(llvm::Value* exchange_value,
+ llvm::Value* dst,
+ llvm::Value* compare_value);
+ llvm::CallInst* CreateCmpxchgPtr(llvm::Value* exchange_value,
+ llvm::Value* dst,
+ llvm::Value* compare_value);
+ llvm::CallInst* CreateGetFrameAddress();
+ llvm::CallInst* CreateMemoryBarrier(int flags);
+ llvm::CallInst* CreateMemset(llvm::Value* dst,
+ llvm::Value* value,
+ llvm::Value* len,
+ llvm::Value* align);
+ llvm::CallInst* CreateUnimplemented(const char* file, int line);
+ llvm::CallInst* CreateShouldNotReachHere(const char* file, int line);
+ NOT_PRODUCT(llvm::CallInst* CreateDump(llvm::Value* value));
+
+ // Flags for CreateMemoryBarrier.
+ public:
+ enum BarrierFlags {
+ BARRIER_LOADLOAD = 1,
+ BARRIER_LOADSTORE = 2,
+ BARRIER_STORELOAD = 4,
+ BARRIER_STORESTORE = 8
+ };
+
+ // HotSpot memory barriers
+ public:
+ void CreateUpdateBarrierSet(BarrierSet* bs, llvm::Value* field);
+
+ // Helpers for accessing the code buffer.
+ public:
+ llvm::Value* code_buffer_address(int offset);
+ llvm::Value* CreateInlineOop(jobject object, const char* name = "");
+ llvm::Value* CreateInlineOop(ciObject* object, const char* name = "") {
+ return CreateInlineOop(object->constant_encoding(), name);
+ }
+ llvm::Value* CreateInlineData(void* data,
+ size_t size,
+ const llvm::Type* type,
+ const char* name = "");
+
+ // Helpers for creating basic blocks.
+ // NB don't use unless SharkFunction::CreateBlock is unavailable.
+ // XXX these are hacky and should be removed.
+ public:
+ llvm::BasicBlock* GetBlockInsertionPoint() const;
+ llvm::BasicBlock* CreateBlock(llvm::BasicBlock* ip,
+ const char* name="") const;
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkCacheDecache.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharkCacheDecache.cpp.incl"
+
+using namespace llvm;
+
+void SharkDecacher::start_frame() {
+ // Start recording the debug information
+ _pc_offset = code_buffer()->create_unique_offset();
+ _oopmap = new OopMap(
+ oopmap_slot_munge(stack()->oopmap_frame_size()),
+ oopmap_slot_munge(arg_size()));
+ debug_info()->add_safepoint(pc_offset(), oopmap());
+}
+
+void SharkDecacher::start_stack(int stack_depth) {
+ // Create the array we'll record our stack slots in
+ _exparray = new GrowableArray<ScopeValue*>(stack_depth);
+
+ // Set the stack pointer
+ stack()->CreateStoreStackPointer(
+ builder()->CreatePtrToInt(
+ stack()->slot_addr(
+ stack()->stack_slots_offset() + max_stack() - stack_depth),
+ SharkType::intptr_type()));
+}
+
+void SharkDecacher::process_stack_slot(int index,
+ SharkValue** addr,
+ int offset) {
+ SharkValue *value = *addr;
+
+ // Write the value to the frame if necessary
+ if (stack_slot_needs_write(index, value)) {
+ write_value_to_frame(
+ SharkType::to_stackType(value->basic_type()),
+ value->generic_value(),
+ adjusted_offset(value, offset));
+ }
+
+ // Record the value in the oopmap if necessary
+ if (stack_slot_needs_oopmap(index, value)) {
+ oopmap()->set_oop(slot2reg(offset));
+ }
+
+ // Record the value in the debuginfo if necessary
+ if (stack_slot_needs_debuginfo(index, value)) {
+ exparray()->append(slot2lv(offset, stack_location_type(index, addr)));
+ }
+}
+
+void SharkDecacher::start_monitors(int num_monitors) {
+ // Create the array we'll record our monitors in
+ _monarray = new GrowableArray<MonitorValue*>(num_monitors);
+}
+
+void SharkDecacher::process_monitor(int index, int box_offset, int obj_offset) {
+ oopmap()->set_oop(slot2reg(obj_offset));
+
+ monarray()->append(new MonitorValue(
+ slot2lv (obj_offset, Location::oop),
+ slot2loc(box_offset, Location::normal)));
+}
+
+void SharkDecacher::process_oop_tmp_slot(Value** value, int offset) {
+ // Decache the temporary oop slot
+ if (*value) {
+ write_value_to_frame(
+ SharkType::oop_type(),
+ *value,
+ offset);
+
+ oopmap()->set_oop(slot2reg(offset));
+ }
+}
+
+void SharkDecacher::process_method_slot(Value** value, int offset) {
+ // Decache the method pointer
+ write_value_to_frame(
+ SharkType::methodOop_type(),
+ *value,
+ offset);
+
+ oopmap()->set_oop(slot2reg(offset));
+}
+
+void SharkDecacher::process_pc_slot(int offset) {
+ // Record the PC
+ builder()->CreateStore(
+ builder()->code_buffer_address(pc_offset()),
+ stack()->slot_addr(offset));
+}
+
+void SharkDecacher::start_locals() {
+ // Create the array we'll record our local variables in
+ _locarray = new GrowableArray<ScopeValue*>(max_locals());}
+
+void SharkDecacher::process_local_slot(int index,
+ SharkValue** addr,
+ int offset) {
+ SharkValue *value = *addr;
+
+ // Write the value to the frame if necessary
+ if (local_slot_needs_write(index, value)) {
+ write_value_to_frame(
+ SharkType::to_stackType(value->basic_type()),
+ value->generic_value(),
+ adjusted_offset(value, offset));
+ }
+
+ // Record the value in the oopmap if necessary
+ if (local_slot_needs_oopmap(index, value)) {
+ oopmap()->set_oop(slot2reg(offset));
+ }
+
+ // Record the value in the debuginfo if necessary
+ if (local_slot_needs_debuginfo(index, value)) {
+ locarray()->append(slot2lv(offset, local_location_type(index, addr)));
+ }
+}
+
+void SharkDecacher::end_frame() {
+ // Record the scope
+ debug_info()->describe_scope(
+ pc_offset(),
+ target(),
+ bci(),
+ true,
+ false,
+ false,
+ debug_info()->create_scope_values(locarray()),
+ debug_info()->create_scope_values(exparray()),
+ debug_info()->create_monitor_values(monarray()));
+
+ // Finish recording the debug information
+ debug_info()->end_safepoint(pc_offset());
+}
+
+void SharkCacher::process_stack_slot(int index,
+ SharkValue** addr,
+ int offset) {
+ SharkValue *value = *addr;
+
+ // Read the value from the frame if necessary
+ if (stack_slot_needs_read(index, value)) {
+ *addr = SharkValue::create_generic(
+ value->type(),
+ read_value_from_frame(
+ SharkType::to_stackType(value->basic_type()),
+ adjusted_offset(value, offset)),
+ value->zero_checked());
+ }
+}
+
+void SharkOSREntryCacher::process_monitor(int index,
+ int box_offset,
+ int obj_offset) {
+ // Copy the monitor from the OSR buffer to the frame
+ int src_offset = max_locals() + index * 2;
+ builder()->CreateStore(
+ builder()->CreateLoad(
+ CreateAddressOfOSRBufEntry(src_offset, SharkType::intptr_type())),
+ stack()->slot_addr(box_offset, SharkType::intptr_type()));
+ builder()->CreateStore(
+ builder()->CreateLoad(
+ CreateAddressOfOSRBufEntry(src_offset + 1, SharkType::oop_type())),
+ stack()->slot_addr(obj_offset, SharkType::oop_type()));
+}
+
+void SharkCacher::process_oop_tmp_slot(Value** value, int offset) {
+ // Cache the temporary oop
+ if (*value)
+ *value = read_value_from_frame(SharkType::oop_type(), offset);
+}
+
+void SharkCacher::process_method_slot(Value** value, int offset) {
+ // Cache the method pointer
+ *value = read_value_from_frame(SharkType::methodOop_type(), offset);
+}
+
+void SharkFunctionEntryCacher::process_method_slot(Value** value, int offset) {
+ // "Cache" the method pointer
+ *value = method();
+}
+
+void SharkCacher::process_local_slot(int index,
+ SharkValue** addr,
+ int offset) {
+ SharkValue *value = *addr;
+
+ // Read the value from the frame if necessary
+ if (local_slot_needs_read(index, value)) {
+ *addr = SharkValue::create_generic(
+ value->type(),
+ read_value_from_frame(
+ SharkType::to_stackType(value->basic_type()),
+ adjusted_offset(value, offset)),
+ value->zero_checked());
+ }
+}
+
+Value* SharkOSREntryCacher::CreateAddressOfOSRBufEntry(int offset,
+ const Type* type) {
+ Value *result = builder()->CreateStructGEP(osr_buf(), offset);
+ if (type != SharkType::intptr_type())
+ result = builder()->CreateBitCast(result, PointerType::getUnqual(type));
+ return result;
+}
+
+void SharkOSREntryCacher::process_local_slot(int index,
+ SharkValue** addr,
+ int offset) {
+ SharkValue *value = *addr;
+
+ // Read the value from the OSR buffer if necessary
+ if (local_slot_needs_read(index, value)) {
+ *addr = SharkValue::create_generic(
+ value->type(),
+ builder()->CreateLoad(
+ CreateAddressOfOSRBufEntry(
+ adjusted_offset(value, max_locals() - 1 - index),
+ SharkType::to_stackType(value->basic_type()))),
+ value->zero_checked());
+ }
+}
+
+void SharkDecacher::write_value_to_frame(const Type* type,
+ Value* value,
+ int offset) {
+ builder()->CreateStore(value, stack()->slot_addr(offset, type));
+}
+
+Value* SharkCacher::read_value_from_frame(const Type* type, int offset) {
+ return builder()->CreateLoad(stack()->slot_addr(offset, type));
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkCacheDecache.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// Class hierarchy:
+// - SharkStateScanner
+// - SharkCacherDecacher
+// - SharkDecacher
+// - SharkJavaCallDecacher
+// - SharkVMCallDecacher
+// - SharkTrapDecacher
+// - SharkCacher
+// - SharkJavaCallCacher
+// - SharkVMCallCacher
+// - SharkFunctionEntryCacher
+// - SharkNormalEntryCacher
+// - SharkOSREntryCacher
+
+class SharkCacherDecacher : public SharkStateScanner {
+ protected:
+ SharkCacherDecacher(SharkFunction* function)
+ : SharkStateScanner(function) {}
+
+ // Helper
+ protected:
+ static int adjusted_offset(SharkValue* value, int offset) {
+ if (value->is_two_word())
+ offset--;
+ return offset;
+ }
+};
+
+class SharkDecacher : public SharkCacherDecacher {
+ protected:
+ SharkDecacher(SharkFunction* function, int bci)
+ : SharkCacherDecacher(function), _bci(bci) {}
+
+ private:
+ int _bci;
+
+ protected:
+ int bci() const {
+ return _bci;
+ }
+
+ private:
+ int _pc_offset;
+ OopMap* _oopmap;
+ GrowableArray<ScopeValue*>* _exparray;
+ GrowableArray<MonitorValue*>* _monarray;
+ GrowableArray<ScopeValue*>* _locarray;
+
+ private:
+ int pc_offset() const {
+ return _pc_offset;
+ }
+ OopMap* oopmap() const {
+ return _oopmap;
+ }
+ GrowableArray<ScopeValue*>* exparray() const {
+ return _exparray;
+ }
+ GrowableArray<MonitorValue*>* monarray() const {
+ return _monarray;
+ }
+ GrowableArray<ScopeValue*>* locarray() const {
+ return _locarray;
+ }
+
+ // Callbacks
+ protected:
+ void start_frame();
+
+ void start_stack(int stack_depth);
+ void process_stack_slot(int index, SharkValue** value, int offset);
+
+ void start_monitors(int num_monitors);
+ void process_monitor(int index, int box_offset, int obj_offset);
+
+ void process_oop_tmp_slot(llvm::Value** value, int offset);
+ void process_method_slot(llvm::Value** value, int offset);
+ void process_pc_slot(int offset);
+
+ void start_locals();
+ void process_local_slot(int index, SharkValue** value, int offset);
+
+ void end_frame();
+
+ // oopmap and debuginfo helpers
+ private:
+ static int oopmap_slot_munge(int offset) {
+ return SharkStack::oopmap_slot_munge(offset);
+ }
+ static VMReg slot2reg(int offset) {
+ return SharkStack::slot2reg(offset);
+ }
+ static Location slot2loc(int offset, Location::Type type) {
+ return Location::new_stk_loc(type, offset * wordSize);
+ }
+ static LocationValue* slot2lv(int offset, Location::Type type) {
+ return new LocationValue(slot2loc(offset, type));
+ }
+ static Location::Type location_type(SharkValue** addr, bool maybe_two_word) {
+ // low addresses this end
+ // Type 32-bit 64-bit
+ // ----------------------------------------------------
+ // stack[0] local[3] jobject oop oop
+ // stack[1] local[2] NULL normal lng
+ // stack[2] local[1] jlong normal invalid
+ // stack[3] local[0] jint normal normal
+ //
+ // high addresses this end
+
+ SharkValue *value = *addr;
+ if (value) {
+ if (value->is_jobject())
+ return Location::oop;
+#ifdef _LP64
+ if (value->is_two_word())
+ return Location::invalid;
+#endif // _LP64
+ return Location::normal;
+ }
+ else {
+ if (maybe_two_word) {
+ value = *(addr - 1);
+ if (value && value->is_two_word()) {
+#ifdef _LP64
+ if (value->is_jlong())
+ return Location::lng;
+ if (value->is_jdouble())
+ return Location::dbl;
+ ShouldNotReachHere();
+#else
+ return Location::normal;
+#endif // _LP64
+ }
+ }
+ return Location::invalid;
+ }
+ }
+
+ // Stack slot helpers
+ protected:
+ virtual bool stack_slot_needs_write(int index, SharkValue* value) = 0;
+ virtual bool stack_slot_needs_oopmap(int index, SharkValue* value) = 0;
+ virtual bool stack_slot_needs_debuginfo(int index, SharkValue* value) = 0;
+
+ static Location::Type stack_location_type(int index, SharkValue** addr) {
+ return location_type(addr, *addr == NULL);
+ }
+
+ // Local slot helpers
+ protected:
+ virtual bool local_slot_needs_write(int index, SharkValue* value) = 0;
+ virtual bool local_slot_needs_oopmap(int index, SharkValue* value) = 0;
+ virtual bool local_slot_needs_debuginfo(int index, SharkValue* value) = 0;
+
+ static Location::Type local_location_type(int index, SharkValue** addr) {
+ return location_type(addr, index > 0);
+ }
+
+ // Writer helper
+ protected:
+ void write_value_to_frame(const llvm::Type* type,
+ llvm::Value* value,
+ int offset);
+};
+
+class SharkJavaCallDecacher : public SharkDecacher {
+ public:
+ SharkJavaCallDecacher(SharkFunction* function, int bci, ciMethod* callee)
+ : SharkDecacher(function, bci), _callee(callee) {}
+
+ private:
+ ciMethod* _callee;
+
+ protected:
+ ciMethod* callee() const {
+ return _callee;
+ }
+
+ // Stack slot helpers
+ protected:
+ bool stack_slot_needs_write(int index, SharkValue* value) {
+ return value && (index < callee()->arg_size() || value->is_jobject());
+ }
+ bool stack_slot_needs_oopmap(int index, SharkValue* value) {
+ return value && value->is_jobject() && index >= callee()->arg_size();
+ }
+ bool stack_slot_needs_debuginfo(int index, SharkValue* value) {
+ return index >= callee()->arg_size();
+ }
+
+ // Local slot helpers
+ protected:
+ bool local_slot_needs_write(int index, SharkValue* value) {
+ return value && value->is_jobject();
+ }
+ bool local_slot_needs_oopmap(int index, SharkValue* value) {
+ return value && value->is_jobject();
+ }
+ bool local_slot_needs_debuginfo(int index, SharkValue* value) {
+ return true;
+ }
+};
+
+class SharkVMCallDecacher : public SharkDecacher {
+ public:
+ SharkVMCallDecacher(SharkFunction* function, int bci)
+ : SharkDecacher(function, bci) {}
+
+ // Stack slot helpers
+ protected:
+ bool stack_slot_needs_write(int index, SharkValue* value) {
+ return value && value->is_jobject();
+ }
+ bool stack_slot_needs_oopmap(int index, SharkValue* value) {
+ return value && value->is_jobject();
+ }
+ bool stack_slot_needs_debuginfo(int index, SharkValue* value) {
+ return true;
+ }
+
+ // Local slot helpers
+ protected:
+ bool local_slot_needs_write(int index, SharkValue* value) {
+ return value && value->is_jobject();
+ }
+ bool local_slot_needs_oopmap(int index, SharkValue* value) {
+ return value && value->is_jobject();
+ }
+ bool local_slot_needs_debuginfo(int index, SharkValue* value) {
+ return true;
+ }
+};
+
+class SharkTrapDecacher : public SharkDecacher {
+ public:
+ SharkTrapDecacher(SharkFunction* function, int bci)
+ : SharkDecacher(function, bci) {}
+
+ // Stack slot helpers
+ protected:
+ bool stack_slot_needs_write(int index, SharkValue* value) {
+ return value != NULL;
+ }
+ bool stack_slot_needs_oopmap(int index, SharkValue* value) {
+ return value && value->is_jobject();
+ }
+ bool stack_slot_needs_debuginfo(int index, SharkValue* value) {
+ return true;
+ }
+
+ // Local slot helpers
+ protected:
+ bool local_slot_needs_write(int index, SharkValue* value) {
+ return value != NULL;
+ }
+ bool local_slot_needs_oopmap(int index, SharkValue* value) {
+ return value && value->is_jobject();
+ }
+ bool local_slot_needs_debuginfo(int index, SharkValue* value) {
+ return true;
+ }
+};
+
+class SharkCacher : public SharkCacherDecacher {
+ protected:
+ SharkCacher(SharkFunction* function)
+ : SharkCacherDecacher(function) {}
+
+ // Callbacks
+ protected:
+ void process_stack_slot(int index, SharkValue** value, int offset);
+
+ void process_oop_tmp_slot(llvm::Value** value, int offset);
+ virtual void process_method_slot(llvm::Value** value, int offset);
+
+ virtual void process_local_slot(int index, SharkValue** value, int offset);
+
+ // Stack slot helper
+ protected:
+ virtual bool stack_slot_needs_read(int index, SharkValue* value) = 0;
+
+ // Local slot helper
+ protected:
+ virtual bool local_slot_needs_read(int index, SharkValue* value) {
+ return value && value->is_jobject();
+ }
+
+ // Writer helper
+ protected:
+ llvm::Value* read_value_from_frame(const llvm::Type* type, int offset);
+};
+
+class SharkJavaCallCacher : public SharkCacher {
+ public:
+ SharkJavaCallCacher(SharkFunction* function, ciMethod* callee)
+ : SharkCacher(function), _callee(callee) {}
+
+ private:
+ ciMethod* _callee;
+
+ protected:
+ ciMethod* callee() const {
+ return _callee;
+ }
+
+ // Stack slot helper
+ protected:
+ bool stack_slot_needs_read(int index, SharkValue* value) {
+ return value && (index < callee()->return_type()->size() ||
+ value->is_jobject());
+ }
+};
+
+class SharkVMCallCacher : public SharkCacher {
+ public:
+ SharkVMCallCacher(SharkFunction* function)
+ : SharkCacher(function) {}
+
+ // Stack slot helper
+ protected:
+ bool stack_slot_needs_read(int index, SharkValue* value) {
+ return value && value->is_jobject();
+ }
+};
+
+class SharkFunctionEntryCacher : public SharkCacher {
+ public:
+ SharkFunctionEntryCacher(SharkFunction* function, llvm::Value* method)
+ : SharkCacher(function), _method(method) {}
+
+ private:
+ llvm::Value* _method;
+
+ private:
+ llvm::Value* method() const {
+ return _method;
+ }
+
+ // Method slot callback
+ protected:
+ void process_method_slot(llvm::Value** value, int offset);
+
+ // Stack slot helper
+ protected:
+ bool stack_slot_needs_read(int index, SharkValue* value) {
+ ShouldNotReachHere(); // entry block shouldn't have stack
+ }
+
+ // Local slot helper
+ protected:
+ bool local_slot_needs_read(int index, SharkValue* value) {
+ return value != NULL;
+ }
+};
+
+class SharkNormalEntryCacher : public SharkFunctionEntryCacher {
+ public:
+ SharkNormalEntryCacher(SharkFunction* function, llvm::Value* method)
+ : SharkFunctionEntryCacher(function, method) {}
+};
+
+class SharkOSREntryCacher : public SharkFunctionEntryCacher {
+ public:
+ SharkOSREntryCacher(SharkFunction* function,
+ llvm::Value* method,
+ llvm::Value* osr_buf)
+ : SharkFunctionEntryCacher(function, method),
+ _osr_buf(
+ builder()->CreateBitCast(
+ osr_buf,
+ llvm::PointerType::getUnqual(
+ llvm::ArrayType::get(
+ SharkType::intptr_type(),
+ max_locals() + max_monitors() * 2)))) {}
+
+ private:
+ llvm::Value* _osr_buf;
+
+ private:
+ llvm::Value* osr_buf() const {
+ return _osr_buf;
+ }
+
+ // Callbacks
+ protected:
+ void process_monitor(int index, int box_offset, int obj_offset);
+ void process_local_slot(int index, SharkValue** value, int offset);
+
+ // Helper
+ private:
+ llvm::Value* CreateAddressOfOSRBufEntry(int offset, const llvm::Type* type);
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkCodeBuffer.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+class SharkCodeBuffer : public StackObj {
+ public:
+ SharkCodeBuffer(MacroAssembler* masm)
+ : _masm(masm), _base_pc(NULL) {}
+
+ private:
+ MacroAssembler* _masm;
+ llvm::Value* _base_pc;
+
+ private:
+ MacroAssembler* masm() const {
+ return _masm;
+ }
+
+ public:
+ llvm::Value* base_pc() const {
+ return _base_pc;
+ }
+ void set_base_pc(llvm::Value* base_pc) {
+ assert(_base_pc == NULL, "only do this once");
+ _base_pc = base_pc;
+ }
+
+ // Allocate some space in the buffer and return its address.
+ // This buffer will have been relocated by the time the method
+ // is installed, so you can't inline the result in code.
+ public:
+ void* malloc(size_t size) const {
+ masm()->align(BytesPerWord);
+ void *result = masm()->pc();
+ masm()->advance(size);
+ return result;
+ }
+
+ // Create a unique offset in the buffer.
+ public:
+ int create_unique_offset() const {
+ int offset = masm()->offset();
+ masm()->advance(1);
+ return offset;
+ }
+
+ // Inline an oop into the buffer and return its offset.
+ public:
+ int inline_oop(jobject object) const {
+ masm()->align(BytesPerWord);
+ int offset = masm()->offset();
+ masm()->store_oop(object);
+ return offset;
+ }
+
+ // Inline a block of non-oop data into the buffer and return its offset.
+ public:
+ int inline_data(void *src, size_t size) const {
+ masm()->align(BytesPerWord);
+ int offset = masm()->offset();
+ void *dst = masm()->pc();
+ masm()->advance(size);
+ memcpy(dst, src, size);
+ return offset;
+ }
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkCompiler.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009, 2010 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharkCompiler.cpp.incl"
+
+#include <fnmatch.h>
+
+using namespace llvm;
+
+#if SHARK_LLVM_VERSION >= 27
+namespace {
+ cl::opt<std::string>
+ MCPU("mcpu");
+
+ cl::list<std::string>
+ MAttrs("mattr",
+ cl::CommaSeparated);
+}
+#endif
+
+SharkCompiler::SharkCompiler()
+ : AbstractCompiler() {
+ // Create the lock to protect the memory manager and execution engine
+ _execution_engine_lock = new Monitor(Mutex::leaf, "SharkExecutionEngineLock");
+ MutexLocker locker(execution_engine_lock());
+
+ // Make LLVM safe for multithreading
+ if (!llvm_start_multithreaded())
+ fatal("llvm_start_multithreaded() failed");
+
+ // Initialize the native target
+ InitializeNativeTarget();
+
+ // Create the two contexts which we'll use
+ _normal_context = new SharkContext("normal");
+ _native_context = new SharkContext("native");
+
+ // Create the memory manager
+ _memory_manager = new SharkMemoryManager();
+
+#if SHARK_LLVM_VERSION >= 27
+ // Finetune LLVM for the current host CPU.
+ StringMap<bool> Features;
+ bool gotCpuFeatures = llvm::sys::getHostCPUFeatures(Features);
+ std::string cpu("-mcpu=" + llvm::sys::getHostCPUName());
+
+ std::vector<const char*> args;
+ args.push_back(""); // program name
+ args.push_back(cpu.c_str());
+
+ std::string mattr("-mattr=");
+ if(gotCpuFeatures){
+ for(StringMap<bool>::iterator I = Features.begin(),
+ E = Features.end(); I != E; ++I){
+ if(I->second){
+ std::string attr(I->first());
+ mattr+="+"+attr+",";
+ }
+ }
+ args.push_back(mattr.c_str());
+ }
+
+ args.push_back(0); // terminator
+ cl::ParseCommandLineOptions(args.size() - 1, (char **) &args[0]);
+
+ // Create the JIT
+ std::string ErrorMsg;
+
+ EngineBuilder builder(_normal_context->module());
+ builder.setMCPU(MCPU);
+ builder.setMAttrs(MAttrs);
+ builder.setJITMemoryManager(memory_manager());
+ builder.setEngineKind(EngineKind::JIT);
+ builder.setErrorStr(&ErrorMsg);
+ _execution_engine = builder.create();
+
+ if (!execution_engine()) {
+ if (!ErrorMsg.empty())
+ printf("Error while creating Shark JIT: %s\n",ErrorMsg.c_str());
+ else
+ printf("Unknown error while creating Shark JIT\n");
+ exit(1);
+ }
+
+ execution_engine()->addModule(
+ _native_context->module());
+#else
+ _execution_engine = ExecutionEngine::createJIT(
+ _normal_context->module_provider(),
+ NULL, memory_manager(), CodeGenOpt::Default);
+ execution_engine()->addModuleProvider(
+ _native_context->module_provider());
+#endif
+
+ // All done
+ mark_initialized();
+}
+
+void SharkCompiler::initialize() {
+ ShouldNotCallThis();
+}
+
+void SharkCompiler::compile_method(ciEnv* env,
+ ciMethod* target,
+ int entry_bci) {
+ assert(is_initialized(), "should be");
+ ResourceMark rm;
+ const char *name = methodname(
+ target->holder()->name()->as_utf8(), target->name()->as_utf8());
+
+ // Do the typeflow analysis
+ ciTypeFlow *flow;
+ if (entry_bci == InvocationEntryBci)
+ flow = target->get_flow_analysis();
+ else
+ flow = target->get_osr_flow_analysis(entry_bci);
+ if (flow->failing())
+ return;
+ if (SharkPrintTypeflowOf != NULL) {
+ if (!fnmatch(SharkPrintTypeflowOf, name, 0))
+ flow->print_on(tty);
+ }
+
+ // Create the recorders
+ Arena arena;
+ env->set_oop_recorder(new OopRecorder(&arena));
+ OopMapSet oopmaps;
+ env->set_debug_info(new DebugInformationRecorder(env->oop_recorder()));
+ env->debug_info()->set_oopmaps(&oopmaps);
+ env->set_dependencies(new Dependencies(env));
+
+ // Create the code buffer and builder
+ CodeBuffer hscb("Shark", 256 * K, 64 * K);
+ hscb.initialize_oop_recorder(env->oop_recorder());
+ MacroAssembler *masm = new MacroAssembler(&hscb);
+ SharkCodeBuffer cb(masm);
+ SharkBuilder builder(&cb);
+
+ // Emit the entry point
+ SharkEntry *entry = (SharkEntry *) cb.malloc(sizeof(SharkEntry));
+
+ // Build the LLVM IR for the method
+ Function *function = SharkFunction::build(env, &builder, flow, name);
+
+ // Generate native code. It's unpleasant that we have to drop into
+ // the VM to do this -- it blocks safepoints -- but I can't see any
+ // other way to handle the locking.
+ {
+ ThreadInVMfromNative tiv(JavaThread::current());
+ generate_native_code(entry, function, name);
+ }
+
+ // Install the method into the VM
+ CodeOffsets offsets;
+ offsets.set_value(CodeOffsets::Deopt, 0);
+ offsets.set_value(CodeOffsets::Exceptions, 0);
+ offsets.set_value(CodeOffsets::Verified_Entry,
+ target->is_static() ? 0 : wordSize);
+
+ ExceptionHandlerTable handler_table;
+ ImplicitExceptionTable inc_table;
+
+ env->register_method(target,
+ entry_bci,
+ &offsets,
+ 0,
+ &hscb,
+ 0,
+ &oopmaps,
+ &handler_table,
+ &inc_table,
+ this,
+ env->comp_level(),
+ false,
+ false);
+}
+
+nmethod* SharkCompiler::generate_native_wrapper(MacroAssembler* masm,
+ methodHandle target,
+ BasicType* arg_types,
+ BasicType return_type) {
+ assert(is_initialized(), "should be");
+ ResourceMark rm;
+ const char *name = methodname(
+ target->klass_name()->as_utf8(), target->name()->as_utf8());
+
+ // Create the code buffer and builder
+ SharkCodeBuffer cb(masm);
+ SharkBuilder builder(&cb);
+
+ // Emit the entry point
+ SharkEntry *entry = (SharkEntry *) cb.malloc(sizeof(SharkEntry));
+
+ // Build the LLVM IR for the method
+ SharkNativeWrapper *wrapper = SharkNativeWrapper::build(
+ &builder, target, name, arg_types, return_type);
+
+ // Generate native code
+ generate_native_code(entry, wrapper->function(), name);
+
+ // Return the nmethod for installation in the VM
+ return nmethod::new_native_nmethod(target,
+ masm->code(),
+ 0,
+ 0,
+ wrapper->frame_size(),
+ wrapper->receiver_offset(),
+ wrapper->lock_offset(),
+ wrapper->oop_maps());
+}
+
+void SharkCompiler::generate_native_code(SharkEntry* entry,
+ Function* function,
+ const char* name) {
+ // Print the LLVM bitcode, if requested
+ if (SharkPrintBitcodeOf != NULL) {
+ if (!fnmatch(SharkPrintBitcodeOf, name, 0))
+ function->dump();
+ }
+
+ // Compile to native code
+ address code = NULL;
+ context()->add_function(function);
+ {
+ MutexLocker locker(execution_engine_lock());
+ free_queued_methods();
+
+ if (SharkPrintAsmOf != NULL) {
+#if SHARK_LLVM_VERSION >= 27
+#ifndef NDEBUG
+ if (!fnmatch(SharkPrintAsmOf, name, 0)) {
+ llvm::SetCurrentDebugType(X86_ONLY("x86-emitter") NOT_X86("jit"));
+ llvm::DebugFlag = true;
+ }
+ else {
+ llvm::SetCurrentDebugType("");
+ llvm::DebugFlag = false;
+ }
+#endif // !NDEBUG
+#else
+ // NB you need to patch LLVM with http://tinyurl.com/yf3baln for this
+ std::vector<const char*> args;
+ args.push_back(""); // program name
+ if (!fnmatch(SharkPrintAsmOf, name, 0))
+ args.push_back("-debug-only=x86-emitter");
+ else
+ args.push_back("-debug-only=none");
+ args.push_back(0); // terminator
+ cl::ParseCommandLineOptions(args.size() - 1, (char **) &args[0]);
+#endif // SHARK_LLVM_VERSION
+ }
+ memory_manager()->set_entry_for_function(function, entry);
+ code = (address) execution_engine()->getPointerToFunction(function);
+ }
+ entry->set_entry_point(code);
+ entry->set_function(function);
+ entry->set_context(context());
+ address code_start = entry->code_start();
+ address code_limit = entry->code_limit();
+
+ // Register generated code for profiling, etc
+ if (JvmtiExport::should_post_dynamic_code_generated())
+ JvmtiExport::post_dynamic_code_generated(name, code_start, code_limit);
+
+ // Print debug information, if requested
+ if (SharkTraceInstalls) {
+ tty->print_cr(
+ " [%p-%p): %s (%d bytes code)",
+ code_start, code_limit, name, code_limit - code_start);
+ }
+}
+
+void SharkCompiler::free_compiled_method(address code) {
+ // This method may only be called when the VM is at a safepoint.
+ // All _thread_in_vm threads will be waiting for the safepoint to
+ // finish with the exception of the VM thread, so we can consider
+ // ourself the owner of the execution engine lock even though we
+ // can't actually acquire it at this time.
+ assert(Thread::current()->is_VM_thread(), "must be called by VM thread");
+ assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+
+ SharkEntry *entry = (SharkEntry *) code;
+ entry->context()->push_to_free_queue(entry->function());
+}
+
+void SharkCompiler::free_queued_methods() {
+ // The free queue is protected by the execution engine lock
+ assert(execution_engine_lock()->owned_by_self(), "should be");
+
+ while (true) {
+ Function *function = context()->pop_from_free_queue();
+ if (function == NULL)
+ break;
+
+ execution_engine()->freeMachineCodeForFunction(function);
+ function->eraseFromParent();
+ }
+}
+
+const char* SharkCompiler::methodname(const char* klass, const char* method) {
+ char *buf = NEW_RESOURCE_ARRAY(char, strlen(klass) + 2 + strlen(method) + 1);
+
+ char *dst = buf;
+ for (const char *c = klass; *c; c++) {
+ if (*c == '/')
+ *(dst++) = '.';
+ else
+ *(dst++) = *c;
+ }
+ *(dst++) = ':';
+ *(dst++) = ':';
+ for (const char *c = method; *c; c++) {
+ *(dst++) = *c;
+ }
+ *(dst++) = '\0';
+ return buf;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkCompiler.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+class SharkContext;
+
+class SharkCompiler : public AbstractCompiler {
+ public:
+ // Creation
+ SharkCompiler();
+
+ // Name of this compiler
+ const char *name() { return "shark"; }
+
+ // Missing feature tests
+ bool supports_native() { return true; }
+ bool supports_osr() { return true; }
+
+ // Customization
+ bool needs_adapters() { return false; }
+ bool needs_stubs() { return false; }
+
+ // Initialization
+ void initialize();
+
+ // Compile a normal (bytecode) method and install it in the VM
+ void compile_method(ciEnv* env, ciMethod* target, int entry_bci);
+
+ // Generate a wrapper for a native (JNI) method
+ nmethod* generate_native_wrapper(MacroAssembler* masm,
+ methodHandle target,
+ BasicType* arg_types,
+ BasicType return_type);
+
+ // Free compiled methods (and native wrappers)
+ void free_compiled_method(address code);
+
+ // Each thread generating IR needs its own context. The normal
+ // context is used for bytecode methods, and is protected from
+ // multiple simultaneous accesses by being restricted to the
+ // compiler thread. The native context is used for JNI methods,
+ // and is protected from multiple simultaneous accesses by the
+ // adapter handler library lock.
+ private:
+ SharkContext* _normal_context;
+ SharkContext* _native_context;
+
+ public:
+ SharkContext* context() const {
+ if (JavaThread::current()->is_Compiler_thread()) {
+ return _normal_context;
+ }
+ else {
+ assert(AdapterHandlerLibrary_lock->owned_by_self(), "should be");
+ return _native_context;
+ }
+ }
+
+ // The LLVM execution engine is the JIT we use to generate native
+ // code. It is thread safe, but we need to protect it with a lock
+ // of our own because otherwise LLVM's lock and HotSpot's locks
+ // interleave and deadlock. The SharkMemoryManager is not thread
+ // safe, and is protected by the same lock as the execution engine.
+ private:
+ Monitor* _execution_engine_lock;
+ SharkMemoryManager* _memory_manager;
+ llvm::ExecutionEngine* _execution_engine;
+
+ private:
+ Monitor* execution_engine_lock() const {
+ return _execution_engine_lock;
+ }
+ SharkMemoryManager* memory_manager() const {
+ assert(execution_engine_lock()->owned_by_self(), "should be");
+ return _memory_manager;
+ }
+ llvm::ExecutionEngine* execution_engine() const {
+ assert(execution_engine_lock()->owned_by_self(), "should be");
+ return _execution_engine;
+ }
+
+ // Global access
+ public:
+ static SharkCompiler* compiler() {
+ AbstractCompiler *compiler =
+ CompileBroker::compiler(CompLevel_fast_compile);
+ assert(compiler->is_shark() && compiler->is_initialized(), "should be");
+ return (SharkCompiler *) compiler;
+ }
+
+ // Helpers
+ private:
+ static const char* methodname(const char* klass, const char* method);
+ void generate_native_code(SharkEntry* entry,
+ llvm::Function* function,
+ const char* name);
+ void free_queued_methods();
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkConstant.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharkConstant.cpp.incl"
+
+using namespace llvm;
+
+SharkConstant* SharkConstant::for_ldc(ciBytecodeStream *iter) {
+ ciConstant constant = iter->get_constant();
+ ciType *type = NULL;
+ if (constant.basic_type() == T_OBJECT) {
+ ciEnv *env = ciEnv::current();
+ if (constant.as_object()->is_klass())
+ type = env->Class_klass();
+ else
+ type = env->String_klass();
+ }
+ return new SharkConstant(constant, type);
+}
+
+SharkConstant* SharkConstant::for_field(ciBytecodeStream *iter) {
+ bool will_link;
+ ciField *field = iter->get_field(will_link);
+ assert(will_link, "typeflow responsibility");
+
+ return new SharkConstant(field->constant_value(), field->type());
+}
+
+SharkConstant::SharkConstant(ciConstant constant, ciType *type) {
+ SharkValue *value = NULL;
+
+ switch (constant.basic_type()) {
+ case T_BOOLEAN:
+ case T_BYTE:
+ case T_CHAR:
+ case T_SHORT:
+ case T_INT:
+ value = SharkValue::jint_constant(constant.as_int());
+ break;
+
+ case T_LONG:
+ value = SharkValue::jlong_constant(constant.as_long());
+ break;
+
+ case T_FLOAT:
+ value = SharkValue::jfloat_constant(constant.as_float());
+ break;
+
+ case T_DOUBLE:
+ value = SharkValue::jdouble_constant(constant.as_double());
+ break;
+
+ case T_OBJECT:
+ case T_ARRAY:
+ break;
+
+ case T_ILLEGAL:
+ // out of memory
+ _is_loaded = false;
+ return;
+
+ default:
+ tty->print_cr("Unhandled type %s", type2name(constant.basic_type()));
+ ShouldNotReachHere();
+ }
+
+ // Handle primitive types. We create SharkValues for these
+ // now; doing so doesn't emit any code, and it allows us to
+ // delegate a bunch of stuff to the SharkValue code.
+ if (value) {
+ _value = value;
+ _is_loaded = true;
+ _is_nonzero = value->zero_checked();
+ _is_two_word = value->is_two_word();
+ return;
+ }
+
+ // Handle reference types. This is tricky because some
+ // ciObjects are psuedo-objects that refer to oops which
+ // have yet to be created. We need to spot the unloaded
+ // objects (which differ between ldc* and get*, thanks!)
+ ciObject *object = constant.as_object();
+ assert(type != NULL, "shouldn't be");
+ if (object->is_klass()) {
+ // The constant returned for a klass is the ciKlass
+ // for the entry, but we want the java_mirror.
+ ciKlass *klass = object->as_klass();
+ if (!klass->is_loaded()) {
+ _is_loaded = false;
+ return;
+ }
+ object = klass->java_mirror();
+ }
+ if (object->is_null_object() || !object->can_be_constant()) {
+ _is_loaded = false;
+ return;
+ }
+
+ _value = NULL;
+ _object = object;
+ _type = type;
+ _is_loaded = true;
+ _is_nonzero = true;
+ _is_two_word = false;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkConstant.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+class SharkConstant : public ResourceObj {
+ public:
+ static SharkConstant* for_ldc(ciBytecodeStream* iter);
+ static SharkConstant* for_field(ciBytecodeStream* iter);
+
+ private:
+ SharkConstant(ciConstant constant, ciType* type);
+
+ private:
+ SharkValue* _value;
+ ciObject* _object;
+ ciType* _type;
+ bool _is_loaded;
+ bool _is_nonzero;
+ bool _is_two_word;
+
+ public:
+ bool is_loaded() const {
+ return _is_loaded;
+ }
+ bool is_nonzero() const {
+ assert(is_loaded(), "should be");
+ return _is_nonzero;
+ }
+ bool is_two_word() const {
+ assert(is_loaded(), "should be");
+ return _is_two_word;
+ }
+
+ public:
+ SharkValue* value(SharkBuilder* builder) {
+ assert(is_loaded(), "should be");
+ if (_value == NULL) {
+ _value = SharkValue::create_generic(
+ _type, builder->CreateInlineOop(_object), _is_nonzero);
+ }
+ return _value;
+ }
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkContext.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2009, 2010 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharkContext.cpp.incl"
+
+using namespace llvm;
+
+SharkContext::SharkContext(const char* name)
+ : LLVMContext(),
+ _free_queue(NULL) {
+ // Create a module to build our functions into
+ _module = new Module(name, *this);
+
+ // Create basic types
+ _void_type = Type::getVoidTy(*this);
+ _bit_type = Type::getInt1Ty(*this);
+ _jbyte_type = Type::getInt8Ty(*this);
+ _jshort_type = Type::getInt16Ty(*this);
+ _jint_type = Type::getInt32Ty(*this);
+ _jlong_type = Type::getInt64Ty(*this);
+ _jfloat_type = Type::getFloatTy(*this);
+ _jdouble_type = Type::getDoubleTy(*this);
+
+ // Create compound types
+ _itableOffsetEntry_type = PointerType::getUnqual(
+ ArrayType::get(jbyte_type(), itableOffsetEntry::size() * wordSize));
+
+ _klass_type = PointerType::getUnqual(
+ ArrayType::get(jbyte_type(), sizeof(Klass)));
+
+ _jniEnv_type = PointerType::getUnqual(
+ ArrayType::get(jbyte_type(), sizeof(JNIEnv)));
+
+ _jniHandleBlock_type = PointerType::getUnqual(
+ ArrayType::get(jbyte_type(), sizeof(JNIHandleBlock)));
+
+ _methodOop_type = PointerType::getUnqual(
+ ArrayType::get(jbyte_type(), sizeof(methodOopDesc)));
+
+ _monitor_type = ArrayType::get(
+ jbyte_type(), frame::interpreter_frame_monitor_size() * wordSize);
+
+ _oop_type = PointerType::getUnqual(
+ ArrayType::get(jbyte_type(), sizeof(oopDesc)));
+
+ _thread_type = PointerType::getUnqual(
+ ArrayType::get(jbyte_type(), sizeof(JavaThread)));
+
+ _zeroStack_type = PointerType::getUnqual(
+ ArrayType::get(jbyte_type(), sizeof(ZeroStack)));
+
+ std::vector<const Type*> params;
+ params.push_back(methodOop_type());
+ params.push_back(intptr_type());
+ params.push_back(thread_type());
+ _entry_point_type = FunctionType::get(jint_type(), params, false);
+
+ params.clear();
+ params.push_back(methodOop_type());
+ params.push_back(PointerType::getUnqual(jbyte_type()));
+ params.push_back(intptr_type());
+ params.push_back(thread_type());
+ _osr_entry_point_type = FunctionType::get(jint_type(), params, false);
+
+ // Create mappings
+ for (int i = 0; i < T_CONFLICT; i++) {
+ switch (i) {
+ case T_BOOLEAN:
+ _to_stackType[i] = jint_type();
+ _to_arrayType[i] = jbyte_type();
+ break;
+
+ case T_BYTE:
+ _to_stackType[i] = jint_type();
+ _to_arrayType[i] = jbyte_type();
+ break;
+
+ case T_CHAR:
+ _to_stackType[i] = jint_type();
+ _to_arrayType[i] = jshort_type();
+ break;
+
+ case T_SHORT:
+ _to_stackType[i] = jint_type();
+ _to_arrayType[i] = jshort_type();
+ break;
+
+ case T_INT:
+ _to_stackType[i] = jint_type();
+ _to_arrayType[i] = jint_type();
+ break;
+
+ case T_LONG:
+ _to_stackType[i] = jlong_type();
+ _to_arrayType[i] = jlong_type();
+ break;
+
+ case T_FLOAT:
+ _to_stackType[i] = jfloat_type();
+ _to_arrayType[i] = jfloat_type();
+ break;
+
+ case T_DOUBLE:
+ _to_stackType[i] = jdouble_type();
+ _to_arrayType[i] = jdouble_type();
+ break;
+
+ case T_OBJECT:
+ case T_ARRAY:
+ _to_stackType[i] = oop_type();
+ _to_arrayType[i] = oop_type();
+ break;
+
+ case T_ADDRESS:
+ _to_stackType[i] = intptr_type();
+ _to_arrayType[i] = NULL;
+ break;
+
+ default:
+ _to_stackType[i] = NULL;
+ _to_arrayType[i] = NULL;
+ }
+ }
+}
+
+class SharkFreeQueueItem : public CHeapObj {
+ public:
+ SharkFreeQueueItem(llvm::Function* function, SharkFreeQueueItem *next)
+ : _function(function), _next(next) {}
+
+ private:
+ llvm::Function* _function;
+ SharkFreeQueueItem* _next;
+
+ public:
+ llvm::Function* function() const {
+ return _function;
+ }
+ SharkFreeQueueItem* next() const {
+ return _next;
+ }
+};
+
+void SharkContext::push_to_free_queue(Function* function) {
+ _free_queue = new SharkFreeQueueItem(function, _free_queue);
+}
+
+Function* SharkContext::pop_from_free_queue() {
+ if (_free_queue == NULL)
+ return NULL;
+
+ SharkFreeQueueItem *item = _free_queue;
+ Function *function = item->function();
+ _free_queue = item->next();
+ delete item;
+ return function;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkContext.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2009, 2010 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// The LLVMContext class allows multiple instances of LLVM to operate
+// independently of each other in a multithreaded context. We extend
+// this here to store things in Shark that are LLVMContext-specific.
+
+class SharkFreeQueueItem;
+
+class SharkContext : public llvm::LLVMContext {
+ public:
+ SharkContext(const char* name);
+
+ private:
+ llvm::Module* _module;
+
+#if SHARK_LLVM_VERSION >= 27
+ public:
+#else
+ private:
+#endif
+ llvm::Module* module() const {
+ return _module;
+ }
+
+ // Get this thread's SharkContext
+ public:
+ static SharkContext& current() {
+ return *SharkCompiler::compiler()->context();
+ }
+
+ // Module accessors
+ public:
+#if SHARK_LLVM_VERSION < 27
+ llvm::ModuleProvider* module_provider() const {
+ return new llvm::ExistingModuleProvider(module());
+ }
+#endif
+ void add_function(llvm::Function* function) const {
+ module()->getFunctionList().push_back(function);
+ }
+ llvm::Constant* get_external(const char* name,
+ const llvm::FunctionType* sig) {
+ return module()->getOrInsertFunction(name, sig);
+ }
+
+ // Basic types
+ private:
+ const llvm::Type* _void_type;
+ const llvm::IntegerType* _bit_type;
+ const llvm::IntegerType* _jbyte_type;
+ const llvm::IntegerType* _jshort_type;
+ const llvm::IntegerType* _jint_type;
+ const llvm::IntegerType* _jlong_type;
+ const llvm::Type* _jfloat_type;
+ const llvm::Type* _jdouble_type;
+
+ public:
+ const llvm::Type* void_type() const {
+ return _void_type;
+ }
+ const llvm::IntegerType* bit_type() const {
+ return _bit_type;
+ }
+ const llvm::IntegerType* jbyte_type() const {
+ return _jbyte_type;
+ }
+ const llvm::IntegerType* jshort_type() const {
+ return _jshort_type;
+ }
+ const llvm::IntegerType* jint_type() const {
+ return _jint_type;
+ }
+ const llvm::IntegerType* jlong_type() const {
+ return _jlong_type;
+ }
+ const llvm::Type* jfloat_type() const {
+ return _jfloat_type;
+ }
+ const llvm::Type* jdouble_type() const {
+ return _jdouble_type;
+ }
+ const llvm::IntegerType* intptr_type() const {
+ return LP64_ONLY(jlong_type()) NOT_LP64(jint_type());
+ }
+
+ // Compound types
+ private:
+ const llvm::PointerType* _itableOffsetEntry_type;
+ const llvm::PointerType* _jniEnv_type;
+ const llvm::PointerType* _jniHandleBlock_type;
+ const llvm::PointerType* _klass_type;
+ const llvm::PointerType* _methodOop_type;
+ const llvm::ArrayType* _monitor_type;
+ const llvm::PointerType* _oop_type;
+ const llvm::PointerType* _thread_type;
+ const llvm::PointerType* _zeroStack_type;
+ const llvm::FunctionType* _entry_point_type;
+ const llvm::FunctionType* _osr_entry_point_type;
+
+ public:
+ const llvm::PointerType* itableOffsetEntry_type() const {
+ return _itableOffsetEntry_type;
+ }
+ const llvm::PointerType* jniEnv_type() const {
+ return _jniEnv_type;
+ }
+ const llvm::PointerType* jniHandleBlock_type() const {
+ return _jniHandleBlock_type;
+ }
+ const llvm::PointerType* klass_type() const {
+ return _klass_type;
+ }
+ const llvm::PointerType* methodOop_type() const {
+ return _methodOop_type;
+ }
+ const llvm::ArrayType* monitor_type() const {
+ return _monitor_type;
+ }
+ const llvm::PointerType* oop_type() const {
+ return _oop_type;
+ }
+ const llvm::PointerType* thread_type() const {
+ return _thread_type;
+ }
+ const llvm::PointerType* zeroStack_type() const {
+ return _zeroStack_type;
+ }
+ const llvm::FunctionType* entry_point_type() const {
+ return _entry_point_type;
+ }
+ const llvm::FunctionType* osr_entry_point_type() const {
+ return _osr_entry_point_type;
+ }
+
+ // Mappings
+ private:
+ const llvm::Type* _to_stackType[T_CONFLICT];
+ const llvm::Type* _to_arrayType[T_CONFLICT];
+
+ private:
+ const llvm::Type* map_type(const llvm::Type* const* table,
+ BasicType type) const {
+ assert(type >= 0 && type < T_CONFLICT, "unhandled type");
+ const llvm::Type* result = table[type];
+ assert(type != NULL, "unhandled type");
+ return result;
+ }
+
+ public:
+ const llvm::Type* to_stackType(BasicType type) const {
+ return map_type(_to_stackType, type);
+ }
+ const llvm::Type* to_arrayType(BasicType type) const {
+ return map_type(_to_arrayType, type);
+ }
+
+ // Functions queued for freeing
+ private:
+ SharkFreeQueueItem* _free_queue;
+
+ public:
+ void push_to_free_queue(llvm::Function* function);
+ llvm::Function* pop_from_free_queue();
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkEntry.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+class SharkContext;
+
+class SharkEntry : public ZeroEntry {
+ private:
+ address _code_limit;
+ SharkContext* _context;
+ llvm::Function* _function;
+
+ public:
+ address code_start() const {
+ return entry_point();
+ }
+ address code_limit() const {
+ return _code_limit;
+ }
+ SharkContext* context() const {
+ return _context;
+ }
+ llvm::Function* function() const {
+ return _function;
+ }
+
+ public:
+ void set_code_limit(address code_limit) {
+ _code_limit = code_limit;
+ }
+ void set_context(SharkContext* context) {
+ _context = context;
+ }
+ void set_function(llvm::Function* function) {
+ _function = function;
+ }
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkFunction.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharkFunction.cpp.incl"
+
+using namespace llvm;
+
+void SharkFunction::initialize(const char *name) {
+ // Create the function
+ _function = Function::Create(
+ entry_point_type(),
+ GlobalVariable::InternalLinkage,
+ name);
+
+ // Get our arguments
+ Function::arg_iterator ai = function()->arg_begin();
+ Argument *method = ai++;
+ method->setName("method");
+ Argument *osr_buf = NULL;
+ if (is_osr()) {
+ osr_buf = ai++;
+ osr_buf->setName("osr_buf");
+ }
+ Argument *base_pc = ai++;
+ base_pc->setName("base_pc");
+ code_buffer()->set_base_pc(base_pc);
+ Argument *thread = ai++;
+ thread->setName("thread");
+ set_thread(thread);
+
+ // Create the list of blocks
+ set_block_insertion_point(NULL);
+ _blocks = NEW_RESOURCE_ARRAY(SharkTopLevelBlock*, block_count());
+ for (int i = 0; i < block_count(); i++) {
+ ciTypeFlow::Block *b = flow()->pre_order_at(i);
+
+ // Work around a bug in pre_order_at() that does not return
+ // the correct pre-ordering. If pre_order_at() were correct
+ // this line could simply be:
+ // _blocks[i] = new SharkTopLevelBlock(this, b);
+ _blocks[b->pre_order()] = new SharkTopLevelBlock(this, b);
+ }
+
+ // Walk the tree from the start block to determine which
+ // blocks are entered and which blocks require phis
+ SharkTopLevelBlock *start_block = block(flow()->start_block_num());
+ assert(start_block->start() == flow()->start_bci(), "blocks out of order");
+ start_block->enter();
+
+ // Initialize all entered blocks
+ for (int i = 0; i < block_count(); i++) {
+ if (block(i)->entered())
+ block(i)->initialize();
+ }
+
+ // Create and push our stack frame
+ set_block_insertion_point(&function()->front());
+ builder()->SetInsertPoint(CreateBlock());
+ _stack = SharkStack::CreateBuildAndPushFrame(this, method);
+
+ // Create the entry state
+ SharkState *entry_state;
+ if (is_osr()) {
+ entry_state = new SharkOSREntryState(start_block, method, osr_buf);
+
+ // Free the OSR buffer
+ builder()->CreateCall(builder()->osr_migration_end(), osr_buf);
+ }
+ else {
+ entry_state = new SharkNormalEntryState(start_block, method);
+
+ // Lock if necessary
+ if (is_synchronized()) {
+ SharkTopLevelBlock *locker =
+ new SharkTopLevelBlock(this, start_block->ciblock());
+ locker->add_incoming(entry_state);
+
+ set_block_insertion_point(start_block->entry_block());
+ locker->acquire_method_lock();
+
+ entry_state = locker->current_state();
+ }
+ }
+
+ // Transition into the method proper
+ start_block->add_incoming(entry_state);
+ builder()->CreateBr(start_block->entry_block());
+
+ // Parse the blocks
+ for (int i = 0; i < block_count(); i++) {
+ if (!block(i)->entered())
+ continue;
+
+ if (i + 1 < block_count())
+ set_block_insertion_point(block(i + 1)->entry_block());
+ else
+ set_block_insertion_point(NULL);
+
+ block(i)->emit_IR();
+ }
+ do_deferred_zero_checks();
+}
+
+class DeferredZeroCheck : public SharkTargetInvariants {
+ public:
+ DeferredZeroCheck(SharkTopLevelBlock* block, SharkValue* value)
+ : SharkTargetInvariants(block),
+ _block(block),
+ _value(value),
+ _bci(block->bci()),
+ _state(block->current_state()->copy()),
+ _check_block(builder()->GetInsertBlock()),
+ _continue_block(function()->CreateBlock("not_zero")) {
+ builder()->SetInsertPoint(continue_block());
+ }
+
+ private:
+ SharkTopLevelBlock* _block;
+ SharkValue* _value;
+ int _bci;
+ SharkState* _state;
+ BasicBlock* _check_block;
+ BasicBlock* _continue_block;
+
+ public:
+ SharkTopLevelBlock* block() const {
+ return _block;
+ }
+ SharkValue* value() const {
+ return _value;
+ }
+ int bci() const {
+ return _bci;
+ }
+ SharkState* state() const {
+ return _state;
+ }
+ BasicBlock* check_block() const {
+ return _check_block;
+ }
+ BasicBlock* continue_block() const {
+ return _continue_block;
+ }
+
+ public:
+ SharkFunction* function() const {
+ return block()->function();
+ }
+
+ public:
+ void process() const {
+ builder()->SetInsertPoint(check_block());
+ block()->do_deferred_zero_check(value(), bci(), state(), continue_block());
+ }
+};
+
+void SharkFunction::add_deferred_zero_check(SharkTopLevelBlock* block,
+ SharkValue* value) {
+ deferred_zero_checks()->append(new DeferredZeroCheck(block, value));
+}
+
+void SharkFunction::do_deferred_zero_checks() {
+ for (int i = 0; i < deferred_zero_checks()->length(); i++)
+ deferred_zero_checks()->at(i)->process();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkFunction.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+class SharkTopLevelBlock;
+class DeferredZeroCheck;
+
+class SharkFunction : public SharkTargetInvariants {
+ friend class SharkStackWithNormalFrame;
+
+ public:
+ static llvm::Function* build(ciEnv* env,
+ SharkBuilder* builder,
+ ciTypeFlow* flow,
+ const char* name) {
+ SharkFunction function(env, builder, flow, name);
+ return function.function();
+ }
+
+ private:
+ SharkFunction(ciEnv* env,
+ SharkBuilder* builder,
+ ciTypeFlow* flow,
+ const char* name)
+ : SharkTargetInvariants(env, builder, flow) { initialize(name); }
+
+ private:
+ void initialize(const char* name);
+
+ private:
+ llvm::Function* _function;
+ SharkTopLevelBlock** _blocks;
+ GrowableArray<DeferredZeroCheck*> _deferred_zero_checks;
+ SharkStack* _stack;
+
+ public:
+ llvm::Function* function() const {
+ return _function;
+ }
+ int block_count() const {
+ return flow()->block_count();
+ }
+ SharkTopLevelBlock* block(int i) const {
+ assert(i < block_count(), "should be");
+ return _blocks[i];
+ }
+ GrowableArray<DeferredZeroCheck*>* deferred_zero_checks() {
+ return &_deferred_zero_checks;
+ }
+ SharkStack* stack() const {
+ return _stack;
+ }
+
+ // On-stack replacement
+ private:
+ bool is_osr() const {
+ return flow()->is_osr_flow();
+ }
+ const llvm::FunctionType* entry_point_type() const {
+ if (is_osr())
+ return SharkType::osr_entry_point_type();
+ else
+ return SharkType::entry_point_type();
+ }
+
+ // Block management
+ private:
+ llvm::BasicBlock* _block_insertion_point;
+
+ void set_block_insertion_point(llvm::BasicBlock* block_insertion_point) {
+ _block_insertion_point = block_insertion_point;
+ }
+ llvm::BasicBlock* block_insertion_point() const {
+ return _block_insertion_point;
+ }
+
+ public:
+ llvm::BasicBlock* CreateBlock(const char* name = "") const {
+ return llvm::BasicBlock::Create(
+ SharkContext::current(), name, function(), block_insertion_point());
+ }
+
+ // Deferred zero checks
+ public:
+ void add_deferred_zero_check(SharkTopLevelBlock* block,
+ SharkValue* value);
+
+ private:
+ void do_deferred_zero_checks();
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkInliner.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,749 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharkInliner.cpp.incl"
+
+using namespace llvm;
+
+class SharkInlineBlock : public SharkBlock {
+ public:
+ SharkInlineBlock(ciMethod* target, SharkState* state)
+ : SharkBlock(state, target),
+ _outer_state(state),
+ _entry_state(new SharkState(this)) {
+ for (int i = target->max_locals() - 1; i >= 0; i--) {
+ SharkValue *value = NULL;
+ if (i < target->arg_size())
+ value = outer_state()->pop();
+ entry_state()->set_local(i, value);
+ }
+ }
+
+ private:
+ SharkState* _outer_state;
+ SharkState* _entry_state;
+
+ private:
+ SharkState* outer_state() {
+ return _outer_state;
+ }
+ SharkState* entry_state() {
+ return _entry_state;
+ }
+
+ public:
+ void emit_IR() {
+ parse_bytecode(0, target()->code_size());
+ }
+
+ private:
+ void do_return(BasicType type) {
+ if (type != T_VOID) {
+ SharkValue *result = pop_result(type);
+ outer_state()->push(result);
+ if (result->is_two_word())
+ outer_state()->push(NULL);
+ }
+ }
+};
+
+class SharkInlinerHelper : public StackObj {
+ public:
+ SharkInlinerHelper(ciMethod* target, SharkState* entry_state)
+ : _target(target),
+ _entry_state(entry_state),
+ _iter(target) {}
+
+ private:
+ ciBytecodeStream _iter;
+ SharkState* _entry_state;
+ ciMethod* _target;
+
+ public:
+ ciBytecodeStream* iter() {
+ return &_iter;
+ }
+ SharkState* entry_state() const {
+ return _entry_state;
+ }
+ ciMethod* target() const {
+ return _target;
+ }
+
+ public:
+ Bytecodes::Code bc() {
+ return iter()->cur_bc();
+ }
+ int max_locals() const {
+ return target()->max_locals();
+ }
+ int max_stack() const {
+ return target()->max_stack();
+ }
+
+ // Inlinability check
+ public:
+ bool is_inlinable();
+
+ private:
+ void initialize_for_check();
+
+ bool do_getstatic() {
+ return do_field_access(true, false);
+ }
+ bool do_getfield() {
+ return do_field_access(true, true);
+ }
+ bool do_putfield() {
+ return do_field_access(false, true);
+ }
+ bool do_field_access(bool is_get, bool is_field);
+
+ // Local variables for inlinability check
+ private:
+ bool* _locals;
+
+ public:
+ bool* local_addr(int index) const {
+ assert(index >= 0 && index < max_locals(), "bad local variable index");
+ return &_locals[index];
+ }
+ bool local(int index) const {
+ return *local_addr(index);
+ }
+ void set_local(int index, bool value) {
+ *local_addr(index) = value;
+ }
+
+ // Expression stack for inlinability check
+ private:
+ bool* _stack;
+ bool* _sp;
+
+ public:
+ int stack_depth() const {
+ return _sp - _stack;
+ }
+ bool* stack_addr(int slot) const {
+ assert(slot >= 0 && slot < stack_depth(), "bad stack slot");
+ return &_sp[-(slot + 1)];
+ }
+ void push(bool value) {
+ assert(stack_depth() < max_stack(), "stack overrun");
+ *(_sp++) = value;
+ }
+ bool pop() {
+ assert(stack_depth() > 0, "stack underrun");
+ return *(--_sp);
+ }
+
+ // Methods for two-word locals
+ public:
+ void push_pair_local(int index) {
+ push(local(index));
+ push(local(index + 1));
+ }
+ void pop_pair_local(int index) {
+ set_local(index + 1, pop());
+ set_local(index, pop());
+ }
+
+ // Code generation
+ public:
+ void do_inline() {
+ (new SharkInlineBlock(target(), entry_state()))->emit_IR();
+ }
+};
+
+// Quick checks so we can bail out before doing too much
+bool SharkInliner::may_be_inlinable(ciMethod *target) {
+ // We can't inline native methods
+ if (target->is_native())
+ return false;
+
+ // Not much point inlining abstract ones, and in any
+ // case we'd need a stack frame to throw the exception
+ if (target->is_abstract())
+ return false;
+
+ // Don't inline anything huge
+ if (target->code_size() > SharkMaxInlineSize)
+ return false;
+
+ // Monitors aren't allowed without a frame to put them in
+ if (target->is_synchronized() || target->has_monitor_bytecodes())
+ return false;
+
+ // We don't do control flow
+ if (target->has_exception_handlers() || target->has_jsrs())
+ return false;
+
+ // Don't try to inline constructors, as they must
+ // eventually call Object.<init> which we can't inline.
+ // Note that this catches <clinit> too, but why would
+ // we be compiling that?
+ if (target->is_initializer())
+ return false;
+
+ // Mustn't inline Object.<init>
+ // Should be caught by the above, but just in case...
+ if (target->intrinsic_id() == vmIntrinsics::_Object_init)
+ return false;
+
+ return true;
+}
+
+// Full-on detailed check, for methods that pass the quick checks
+// Inlined methods have no stack frame, so we can't do anything
+// that would require one. This means no safepoints (and hence
+// no loops) and no VM calls. No VM calls means, amongst other
+// things, that no exceptions can be created, which means no null
+// checks or divide-by-zero checks are allowed. The lack of null
+// checks in particular would eliminate practically everything,
+// but we can get around that restriction by relying on the zero-
+// check eliminator to strip the checks. To do that, we need to
+// walk through the method, tracking which values are and are not
+// zero-checked.
+bool SharkInlinerHelper::is_inlinable() {
+ ResourceMark rm;
+ initialize_for_check();
+
+ SharkConstant *sc;
+ bool a, b, c, d;
+
+ iter()->reset_to_bci(0);
+ while (iter()->next() != ciBytecodeStream::EOBC()) {
+ switch (bc()) {
+ case Bytecodes::_nop:
+ break;
+
+ case Bytecodes::_aconst_null:
+ push(false);
+ break;
+
+ case Bytecodes::_iconst_0:
+ push(false);
+ break;
+ case Bytecodes::_iconst_m1:
+ case Bytecodes::_iconst_1:
+ case Bytecodes::_iconst_2:
+ case Bytecodes::_iconst_3:
+ case Bytecodes::_iconst_4:
+ case Bytecodes::_iconst_5:
+ push(true);
+ break;
+
+ case Bytecodes::_lconst_0:
+ push(false);
+ push(false);
+ break;
+ case Bytecodes::_lconst_1:
+ push(true);
+ push(false);
+ break;
+
+ case Bytecodes::_fconst_0:
+ case Bytecodes::_fconst_1:
+ case Bytecodes::_fconst_2:
+ push(false);
+ break;
+
+ case Bytecodes::_dconst_0:
+ case Bytecodes::_dconst_1:
+ push(false);
+ push(false);
+ break;
+
+ case Bytecodes::_bipush:
+ push(iter()->get_constant_u1() != 0);
+ break;
+ case Bytecodes::_sipush:
+ push(iter()->get_constant_u2() != 0);
+ break;
+
+ case Bytecodes::_ldc:
+ case Bytecodes::_ldc_w:
+ case Bytecodes::_ldc2_w:
+ sc = SharkConstant::for_ldc(iter());
+ if (!sc->is_loaded())
+ return false;
+ push(sc->is_nonzero());
+ if (sc->is_two_word())
+ push(false);
+ break;
+
+ case Bytecodes::_iload_0:
+ case Bytecodes::_fload_0:
+ case Bytecodes::_aload_0:
+ push(local(0));
+ break;
+ case Bytecodes::_lload_0:
+ case Bytecodes::_dload_0:
+ push_pair_local(0);
+ break;
+
+ case Bytecodes::_iload_1:
+ case Bytecodes::_fload_1:
+ case Bytecodes::_aload_1:
+ push(local(1));
+ break;
+ case Bytecodes::_lload_1:
+ case Bytecodes::_dload_1:
+ push_pair_local(1);
+ break;
+
+ case Bytecodes::_iload_2:
+ case Bytecodes::_fload_2:
+ case Bytecodes::_aload_2:
+ push(local(2));
+ break;
+ case Bytecodes::_lload_2:
+ case Bytecodes::_dload_2:
+ push_pair_local(2);
+ break;
+
+ case Bytecodes::_iload_3:
+ case Bytecodes::_fload_3:
+ case Bytecodes::_aload_3:
+ push(local(3));
+ break;
+ case Bytecodes::_lload_3:
+ case Bytecodes::_dload_3:
+ push_pair_local(3);
+ break;
+
+ case Bytecodes::_iload:
+ case Bytecodes::_fload:
+ case Bytecodes::_aload:
+ push(local(iter()->get_index()));
+ break;
+ case Bytecodes::_lload:
+ case Bytecodes::_dload:
+ push_pair_local(iter()->get_index());
+ break;
+
+ case Bytecodes::_istore_0:
+ case Bytecodes::_fstore_0:
+ case Bytecodes::_astore_0:
+ set_local(0, pop());
+ break;
+ case Bytecodes::_lstore_0:
+ case Bytecodes::_dstore_0:
+ pop_pair_local(0);
+ break;
+
+ case Bytecodes::_istore_1:
+ case Bytecodes::_fstore_1:
+ case Bytecodes::_astore_1:
+ set_local(1, pop());
+ break;
+ case Bytecodes::_lstore_1:
+ case Bytecodes::_dstore_1:
+ pop_pair_local(1);
+ break;
+
+ case Bytecodes::_istore_2:
+ case Bytecodes::_fstore_2:
+ case Bytecodes::_astore_2:
+ set_local(2, pop());
+ break;
+ case Bytecodes::_lstore_2:
+ case Bytecodes::_dstore_2:
+ pop_pair_local(2);
+ break;
+
+ case Bytecodes::_istore_3:
+ case Bytecodes::_fstore_3:
+ case Bytecodes::_astore_3:
+ set_local(3, pop());
+ break;
+ case Bytecodes::_lstore_3:
+ case Bytecodes::_dstore_3:
+ pop_pair_local(3);
+ break;
+
+ case Bytecodes::_istore:
+ case Bytecodes::_fstore:
+ case Bytecodes::_astore:
+ set_local(iter()->get_index(), pop());
+ break;
+ case Bytecodes::_lstore:
+ case Bytecodes::_dstore:
+ pop_pair_local(iter()->get_index());
+ break;
+
+ case Bytecodes::_pop:
+ pop();
+ break;
+ case Bytecodes::_pop2:
+ pop();
+ pop();
+ break;
+ case Bytecodes::_swap:
+ a = pop();
+ b = pop();
+ push(a);
+ push(b);
+ break;
+ case Bytecodes::_dup:
+ a = pop();
+ push(a);
+ push(a);
+ break;
+ case Bytecodes::_dup_x1:
+ a = pop();
+ b = pop();
+ push(a);
+ push(b);
+ push(a);
+ break;
+ case Bytecodes::_dup_x2:
+ a = pop();
+ b = pop();
+ c = pop();
+ push(a);
+ push(c);
+ push(b);
+ push(a);
+ break;
+ case Bytecodes::_dup2:
+ a = pop();
+ b = pop();
+ push(b);
+ push(a);
+ push(b);
+ push(a);
+ break;
+ case Bytecodes::_dup2_x1:
+ a = pop();
+ b = pop();
+ c = pop();
+ push(b);
+ push(a);
+ push(c);
+ push(b);
+ push(a);
+ break;
+ case Bytecodes::_dup2_x2:
+ a = pop();
+ b = pop();
+ c = pop();
+ d = pop();
+ push(b);
+ push(a);
+ push(d);
+ push(c);
+ push(b);
+ push(a);
+ break;
+
+ case Bytecodes::_getfield:
+ if (!do_getfield())
+ return false;
+ break;
+ case Bytecodes::_getstatic:
+ if (!do_getstatic())
+ return false;
+ break;
+ case Bytecodes::_putfield:
+ if (!do_putfield())
+ return false;
+ break;
+
+ case Bytecodes::_iadd:
+ case Bytecodes::_isub:
+ case Bytecodes::_imul:
+ case Bytecodes::_iand:
+ case Bytecodes::_ixor:
+ case Bytecodes::_ishl:
+ case Bytecodes::_ishr:
+ case Bytecodes::_iushr:
+ pop();
+ pop();
+ push(false);
+ break;
+ case Bytecodes::_ior:
+ a = pop();
+ b = pop();
+ push(a && b);
+ break;
+ case Bytecodes::_idiv:
+ case Bytecodes::_irem:
+ if (!pop())
+ return false;
+ pop();
+ push(false);
+ break;
+ case Bytecodes::_ineg:
+ break;
+
+ case Bytecodes::_ladd:
+ case Bytecodes::_lsub:
+ case Bytecodes::_lmul:
+ case Bytecodes::_land:
+ case Bytecodes::_lxor:
+ pop();
+ pop();
+ pop();
+ pop();
+ push(false);
+ push(false);
+ break;
+ case Bytecodes::_lor:
+ a = pop();
+ b = pop();
+ push(a && b);
+ break;
+ case Bytecodes::_ldiv:
+ case Bytecodes::_lrem:
+ pop();
+ if (!pop())
+ return false;
+ pop();
+ pop();
+ push(false);
+ push(false);
+ break;
+ case Bytecodes::_lneg:
+ break;
+ case Bytecodes::_lshl:
+ case Bytecodes::_lshr:
+ case Bytecodes::_lushr:
+ pop();
+ pop();
+ pop();
+ push(false);
+ push(false);
+ break;
+
+ case Bytecodes::_fadd:
+ case Bytecodes::_fsub:
+ case Bytecodes::_fmul:
+ case Bytecodes::_fdiv:
+ case Bytecodes::_frem:
+ pop();
+ pop();
+ push(false);
+ break;
+ case Bytecodes::_fneg:
+ break;
+
+ case Bytecodes::_dadd:
+ case Bytecodes::_dsub:
+ case Bytecodes::_dmul:
+ case Bytecodes::_ddiv:
+ case Bytecodes::_drem:
+ pop();
+ pop();
+ pop();
+ pop();
+ push(false);
+ push(false);
+ break;
+ case Bytecodes::_dneg:
+ break;
+
+ case Bytecodes::_iinc:
+ set_local(iter()->get_index(), false);
+ break;
+
+ case Bytecodes::_lcmp:
+ pop();
+ pop();
+ pop();
+ pop();
+ push(false);
+ break;
+
+ case Bytecodes::_fcmpl:
+ case Bytecodes::_fcmpg:
+ pop();
+ pop();
+ push(false);
+ break;
+
+ case Bytecodes::_dcmpl:
+ case Bytecodes::_dcmpg:
+ pop();
+ pop();
+ pop();
+ pop();
+ push(false);
+ break;
+
+ case Bytecodes::_i2l:
+ push(false);
+ break;
+ case Bytecodes::_i2f:
+ pop();
+ push(false);
+ break;
+ case Bytecodes::_i2d:
+ pop();
+ push(false);
+ push(false);
+ break;
+
+ case Bytecodes::_l2i:
+ case Bytecodes::_l2f:
+ pop();
+ pop();
+ push(false);
+ break;
+ case Bytecodes::_l2d:
+ pop();
+ pop();
+ push(false);
+ push(false);
+ break;
+
+ case Bytecodes::_f2i:
+ pop();
+ push(false);
+ break;
+ case Bytecodes::_f2l:
+ case Bytecodes::_f2d:
+ pop();
+ push(false);
+ push(false);
+ break;
+
+ case Bytecodes::_d2i:
+ case Bytecodes::_d2f:
+ pop();
+ pop();
+ push(false);
+ break;
+ case Bytecodes::_d2l:
+ pop();
+ pop();
+ push(false);
+ push(false);
+ break;
+
+ case Bytecodes::_i2b:
+ case Bytecodes::_i2c:
+ case Bytecodes::_i2s:
+ pop();
+ push(false);
+ break;
+
+ case Bytecodes::_return:
+ case Bytecodes::_ireturn:
+ case Bytecodes::_lreturn:
+ case Bytecodes::_freturn:
+ case Bytecodes::_dreturn:
+ case Bytecodes::_areturn:
+ break;
+
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void SharkInlinerHelper::initialize_for_check() {
+ _locals = NEW_RESOURCE_ARRAY(bool, max_locals());
+ _stack = NEW_RESOURCE_ARRAY(bool, max_stack());
+
+ memset(_locals, 0, max_locals() * sizeof(bool));
+ for (int i = 0; i < target()->arg_size(); i++) {
+ SharkValue *arg = entry_state()->stack(target()->arg_size() - 1 - i);
+ if (arg && arg->zero_checked())
+ set_local(i, true);
+ }
+
+ _sp = _stack;
+}
+
+bool SharkInlinerHelper::do_field_access(bool is_get, bool is_field) {
+ assert(is_get || is_field, "can't inline putstatic");
+
+ // If the holder isn't linked then there isn't a lot we can do
+ if (!target()->holder()->is_linked())
+ return false;
+
+ // Get the field
+ bool will_link;
+ ciField *field = iter()->get_field(will_link);
+ if (!will_link)
+ return false;
+
+ // If the field is mismatched then an exception needs throwing
+ if (is_field == field->is_static())
+ return false;
+
+ // Pop the value off the stack if necessary
+ if (!is_get) {
+ pop();
+ if (field->type()->is_two_word())
+ pop();
+ }
+
+ // Pop and null-check the receiver if necessary
+ if (is_field) {
+ if (!pop())
+ return false;
+ }
+
+ // Push the result if necessary
+ if (is_get) {
+ bool result_pushed = false;
+ if (field->is_constant()) {
+ SharkConstant *sc = SharkConstant::for_field(iter());
+ if (sc->is_loaded()) {
+ push(sc->is_nonzero());
+ result_pushed = true;
+ }
+ }
+
+ if (!result_pushed)
+ push(false);
+
+ if (field->type()->is_two_word())
+ push(false);
+ }
+
+ return true;
+}
+
+bool SharkInliner::attempt_inline(ciMethod *target, SharkState *state) {
+ if (SharkIntrinsics::is_intrinsic(target)) {
+ SharkIntrinsics::inline_intrinsic(target, state);
+ return true;
+ }
+
+ if (may_be_inlinable(target)) {
+ SharkInlinerHelper inliner(target, state);
+ if (inliner.is_inlinable()) {
+ inliner.do_inline();
+ return true;
+ }
+ }
+ return false;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkInliner.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+class SharkInliner : public AllStatic {
+ public:
+ static bool attempt_inline(ciMethod* target, SharkState* state);
+
+ private:
+ static bool may_be_inlinable(ciMethod* target);
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkIntrinsics.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharkIntrinsics.cpp.incl"
+
+using namespace llvm;
+
+bool SharkIntrinsics::is_intrinsic(ciMethod *target) {
+ switch (target->intrinsic_id()) {
+ case vmIntrinsics::_none:
+ return false;
+
+ // java.lang.Math
+ case vmIntrinsics::_min:
+ case vmIntrinsics::_max:
+ case vmIntrinsics::_dabs:
+ case vmIntrinsics::_dsin:
+ case vmIntrinsics::_dcos:
+ case vmIntrinsics::_dtan:
+ case vmIntrinsics::_datan2:
+ case vmIntrinsics::_dsqrt:
+ case vmIntrinsics::_dlog:
+ case vmIntrinsics::_dlog10:
+ case vmIntrinsics::_dpow:
+ case vmIntrinsics::_dexp:
+ return true;
+
+ // java.lang.Object
+ case vmIntrinsics::_getClass:
+ return true;
+
+ // java.lang.System
+ case vmIntrinsics::_currentTimeMillis:
+ return true;
+
+ // java.lang.Thread
+ case vmIntrinsics::_currentThread:
+ return true;
+
+ // sun.misc.Unsafe
+ case vmIntrinsics::_compareAndSwapInt:
+ return true;
+
+ default:
+ if (SharkPerformanceWarnings) {
+ warning(
+ "unhandled intrinsic vmIntrinsic::%s",
+ vmIntrinsics::name_at(target->intrinsic_id()));
+ }
+ }
+ return false;
+}
+
+void SharkIntrinsics::inline_intrinsic(ciMethod *target, SharkState *state) {
+ SharkIntrinsics intrinsic(state, target);
+ intrinsic.do_intrinsic();
+}
+
+void SharkIntrinsics::do_intrinsic() {
+ switch (target()->intrinsic_id()) {
+ // java.lang.Math
+ case vmIntrinsics::_min:
+ do_Math_minmax(llvm::ICmpInst::ICMP_SLE);
+ break;
+ case vmIntrinsics::_max:
+ do_Math_minmax(llvm::ICmpInst::ICMP_SGE);
+ break;
+ case vmIntrinsics::_dabs:
+ do_Math_1to1(builder()->fabs());
+ break;
+ case vmIntrinsics::_dsin:
+ do_Math_1to1(builder()->sin());
+ break;
+ case vmIntrinsics::_dcos:
+ do_Math_1to1(builder()->cos());
+ break;
+ case vmIntrinsics::_dtan:
+ do_Math_1to1(builder()->tan());
+ break;
+ case vmIntrinsics::_datan2:
+ do_Math_2to1(builder()->atan2());
+ break;
+ case vmIntrinsics::_dsqrt:
+ do_Math_1to1(builder()->sqrt());
+ break;
+ case vmIntrinsics::_dlog:
+ do_Math_1to1(builder()->log());
+ break;
+ case vmIntrinsics::_dlog10:
+ do_Math_1to1(builder()->log10());
+ break;
+ case vmIntrinsics::_dpow:
+ do_Math_2to1(builder()->pow());
+ break;
+ case vmIntrinsics::_dexp:
+ do_Math_1to1(builder()->exp());
+ break;
+
+ // java.lang.Object
+ case vmIntrinsics::_getClass:
+ do_Object_getClass();
+ break;
+
+ // java.lang.System
+ case vmIntrinsics::_currentTimeMillis:
+ do_System_currentTimeMillis();
+ break;
+
+ // java.lang.Thread
+ case vmIntrinsics::_currentThread:
+ do_Thread_currentThread();
+ break;
+
+ // sun.misc.Unsafe
+ case vmIntrinsics::_compareAndSwapInt:
+ do_Unsafe_compareAndSwapInt();
+ break;
+
+ default:
+ ShouldNotReachHere();
+ }
+}
+
+void SharkIntrinsics::do_Math_minmax(ICmpInst::Predicate p) {
+ // Pop the arguments
+ SharkValue *sb = state()->pop();
+ SharkValue *sa = state()->pop();
+ Value *a = sa->jint_value();
+ Value *b = sb->jint_value();
+
+ // Perform the test
+ BasicBlock *ip = builder()->GetBlockInsertionPoint();
+ BasicBlock *return_a = builder()->CreateBlock(ip, "return_a");
+ BasicBlock *return_b = builder()->CreateBlock(ip, "return_b");
+ BasicBlock *done = builder()->CreateBlock(ip, "done");
+
+ builder()->CreateCondBr(builder()->CreateICmp(p, a, b), return_a, return_b);
+
+ builder()->SetInsertPoint(return_a);
+ builder()->CreateBr(done);
+
+ builder()->SetInsertPoint(return_b);
+ builder()->CreateBr(done);
+
+ builder()->SetInsertPoint(done);
+ PHINode *phi = builder()->CreatePHI(a->getType(), "result");
+ phi->addIncoming(a, return_a);
+ phi->addIncoming(b, return_b);
+
+ // Push the result
+ state()->push(
+ SharkValue::create_jint(
+ phi,
+ sa->zero_checked() && sb->zero_checked()));
+}
+
+void SharkIntrinsics::do_Math_1to1(Value *function) {
+ SharkValue *empty = state()->pop();
+ assert(empty == NULL, "should be");
+ state()->push(
+ SharkValue::create_jdouble(
+ builder()->CreateCall(
+ function, state()->pop()->jdouble_value())));
+ state()->push(NULL);
+}
+
+void SharkIntrinsics::do_Math_2to1(Value *function) {
+ SharkValue *empty = state()->pop();
+ assert(empty == NULL, "should be");
+ Value *y = state()->pop()->jdouble_value();
+ empty = state()->pop();
+ assert(empty == NULL, "should be");
+ Value *x = state()->pop()->jdouble_value();
+
+ state()->push(
+ SharkValue::create_jdouble(
+ builder()->CreateCall2(function, x, y)));
+ state()->push(NULL);
+}
+
+void SharkIntrinsics::do_Object_getClass() {
+ Value *klass = builder()->CreateValueOfStructEntry(
+ state()->pop()->jobject_value(),
+ in_ByteSize(oopDesc::klass_offset_in_bytes()),
+ SharkType::oop_type(),
+ "klass");
+
+ Value *klass_part = builder()->CreateAddressOfStructEntry(
+ klass,
+ in_ByteSize(klassOopDesc::klass_part_offset_in_bytes()),
+ SharkType::klass_type(),
+ "klass_part");
+
+ state()->push(
+ SharkValue::create_jobject(
+ builder()->CreateValueOfStructEntry(
+ klass_part,
+ in_ByteSize(Klass::java_mirror_offset_in_bytes()),
+ SharkType::oop_type(),
+ "java_mirror"),
+ true));
+}
+
+void SharkIntrinsics::do_System_currentTimeMillis() {
+ state()->push(
+ SharkValue::create_jlong(
+ builder()->CreateCall(builder()->current_time_millis()),
+ false));
+ state()->push(NULL);
+}
+
+void SharkIntrinsics::do_Thread_currentThread() {
+ state()->push(
+ SharkValue::create_jobject(
+ builder()->CreateValueOfStructEntry(
+ thread(), JavaThread::threadObj_offset(),
+ SharkType::oop_type(),
+ "threadObj"),
+ true));
+}
+
+void SharkIntrinsics::do_Unsafe_compareAndSwapInt() {
+ // Pop the arguments
+ Value *x = state()->pop()->jint_value();
+ Value *e = state()->pop()->jint_value();
+ SharkValue *empty = state()->pop();
+ assert(empty == NULL, "should be");
+ Value *offset = state()->pop()->jlong_value();
+ Value *object = state()->pop()->jobject_value();
+ Value *unsafe = state()->pop()->jobject_value();
+
+ // Convert the offset
+ offset = builder()->CreateCall(
+ builder()->unsafe_field_offset_to_byte_offset(),
+ offset);
+
+ // Locate the field
+ Value *addr = builder()->CreateIntToPtr(
+ builder()->CreateAdd(
+ builder()->CreatePtrToInt(object, SharkType::intptr_type()),
+ builder()->CreateIntCast(offset, SharkType::intptr_type(), true)),
+ PointerType::getUnqual(SharkType::jint_type()),
+ "addr");
+
+ // Perform the operation
+ Value *result = builder()->CreateCmpxchgInt(x, addr, e);
+
+ // Push the result
+ state()->push(
+ SharkValue::create_jint(
+ builder()->CreateIntCast(
+ builder()->CreateICmpEQ(result, e), SharkType::jint_type(), true),
+ false));
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkIntrinsics.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+class SharkIntrinsics : public SharkTargetInvariants {
+ public:
+ static bool is_intrinsic(ciMethod* target);
+ static void inline_intrinsic(ciMethod* target, SharkState* state);
+
+ private:
+ SharkIntrinsics(SharkState* state, ciMethod* target)
+ : SharkTargetInvariants(state, target), _state(state) {}
+
+ private:
+ SharkState* _state;
+
+ private:
+ SharkState* state() const {
+ return _state;
+ }
+
+ private:
+ void do_intrinsic();
+
+ private:
+ void do_Math_minmax(llvm::ICmpInst::Predicate p);
+ void do_Math_1to1(llvm::Value* function);
+ void do_Math_2to1(llvm::Value* function);
+ void do_Object_getClass();
+ void do_System_currentTimeMillis();
+ void do_Thread_currentThread();
+ void do_Unsafe_compareAndSwapInt();
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkInvariants.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharkInvariants.cpp.incl"
+
+int SharkTargetInvariants::count_monitors() {
+ int result = 0;
+ if (is_synchronized() || target()->has_monitor_bytecodes()) {
+ for (int i = 0; i < flow()->block_count(); i++) {
+ result = MAX2(result, flow()->pre_order_at(i)->monitor_count());
+ }
+ }
+ return result;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkInvariants.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// Base classes used to track various values through the compilation.
+// SharkCompileInvariants is used to track values which remain the
+// same for the top-level method and any inlined methods it may have
+// (ie for the whole compilation). SharkTargetInvariants is used to
+// track values which differ between methods.
+
+class SharkCompileInvariants : public ResourceObj {
+ protected:
+ SharkCompileInvariants(ciEnv* env, SharkBuilder* builder)
+ : _env(env),
+ _builder(builder),
+ _thread(NULL) {}
+
+ SharkCompileInvariants(const SharkCompileInvariants* parent)
+ : _env(parent->_env),
+ _builder(parent->_builder),
+ _thread(parent->_thread) {}
+
+ private:
+ ciEnv* _env;
+ SharkBuilder* _builder;
+ llvm::Value* _thread;
+
+ // Top-level broker for HotSpot's Compiler Interface.
+ //
+ // Its main purpose is to allow the various CI classes to access
+ // oops in the VM without having to worry about safepointing. In
+ // addition to this it acts as a holder for various recorders and
+ // memory allocators.
+ //
+ // Accessing this directly is kind of ugly, so it's private. Add
+ // new accessors below if you need something from it.
+ private:
+ ciEnv* env() const {
+ assert(_env != NULL, "env not available");
+ return _env;
+ }
+
+ // The SharkBuilder that is used to build LLVM IR.
+ protected:
+ SharkBuilder* builder() const {
+ return _builder;
+ }
+
+ // Pointer to this thread's JavaThread object. This is not
+ // available until a short way into SharkFunction creation
+ // so a setter is required. Assertions are used to enforce
+ // invariance.
+ protected:
+ llvm::Value* thread() const {
+ assert(_thread != NULL, "thread not available");
+ return _thread;
+ }
+ void set_thread(llvm::Value* thread) {
+ assert(_thread == NULL, "thread already set");
+ _thread = thread;
+ }
+
+ // Objects that handle various aspects of the compilation.
+ protected:
+ DebugInformationRecorder* debug_info() const {
+ return env()->debug_info();
+ }
+ Dependencies* dependencies() const {
+ return env()->dependencies();
+ }
+ SharkCodeBuffer* code_buffer() const {
+ return builder()->code_buffer();
+ }
+
+ // Commonly used classes
+ protected:
+ ciInstanceKlass* java_lang_Object_klass() const {
+ return env()->Object_klass();
+ }
+ ciInstanceKlass* java_lang_Throwable_klass() const {
+ return env()->Throwable_klass();
+ }
+};
+
+class SharkTargetInvariants : public SharkCompileInvariants {
+ protected:
+ SharkTargetInvariants(ciEnv* env, SharkBuilder* builder, ciTypeFlow* flow)
+ : SharkCompileInvariants(env, builder),
+ _target(flow->method()),
+ _flow(flow),
+ _max_monitors(count_monitors()) {}
+
+ SharkTargetInvariants(const SharkCompileInvariants* parent, ciMethod* target)
+ : SharkCompileInvariants(parent),
+ _target(target),
+ _flow(NULL),
+ _max_monitors(count_monitors()) {}
+
+ SharkTargetInvariants(const SharkTargetInvariants* parent)
+ : SharkCompileInvariants(parent),
+ _target(parent->_target),
+ _flow(parent->_flow),
+ _max_monitors(parent->_max_monitors) {}
+
+ private:
+ int count_monitors();
+
+ private:
+ ciMethod* _target;
+ ciTypeFlow* _flow;
+ int _max_monitors;
+
+ // The method being compiled.
+ protected:
+ ciMethod* target() const {
+ return _target;
+ }
+
+ // Typeflow analysis of the method being compiled.
+ protected:
+ ciTypeFlow* flow() const {
+ assert(_flow != NULL, "typeflow not available");
+ return _flow;
+ }
+
+ // Properties of the method.
+ protected:
+ int max_locals() const {
+ return target()->max_locals();
+ }
+ int max_stack() const {
+ return target()->max_stack();
+ }
+ int max_monitors() const {
+ return _max_monitors;
+ }
+ int arg_size() const {
+ return target()->arg_size();
+ }
+ bool is_static() const {
+ return target()->is_static();
+ }
+ bool is_synchronized() const {
+ return target()->is_synchronized();
+ }
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkMemoryManager.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharkMemoryManager.cpp.incl"
+
+using namespace llvm;
+
+void SharkMemoryManager::AllocateGOT() {
+ mm()->AllocateGOT();
+}
+
+unsigned char* SharkMemoryManager::getGOTBase() const {
+ return mm()->getGOTBase();
+}
+
+unsigned char* SharkMemoryManager::allocateStub(const GlobalValue* F,
+ unsigned StubSize,
+ unsigned Alignment) {
+ return mm()->allocateStub(F, StubSize, Alignment);
+}
+
+unsigned char* SharkMemoryManager::startFunctionBody(const Function* F,
+ uintptr_t& ActualSize) {
+ return mm()->startFunctionBody(F, ActualSize);
+}
+
+void SharkMemoryManager::endFunctionBody(const Function* F,
+ unsigned char* FunctionStart,
+ unsigned char* FunctionEnd) {
+ mm()->endFunctionBody(F, FunctionStart, FunctionEnd);
+
+ SharkEntry *entry = get_entry_for_function(F);
+ if (entry != NULL)
+ entry->set_code_limit(FunctionEnd);
+}
+
+unsigned char* SharkMemoryManager::startExceptionTable(const Function* F,
+ uintptr_t& ActualSize) {
+ return mm()->startExceptionTable(F, ActualSize);
+}
+
+void SharkMemoryManager::endExceptionTable(const Function* F,
+ unsigned char* TableStart,
+ unsigned char* TableEnd,
+ unsigned char* FrameRegister) {
+ mm()->endExceptionTable(F, TableStart, TableEnd, FrameRegister);
+}
+
+void SharkMemoryManager::setMemoryWritable() {
+ mm()->setMemoryWritable();
+}
+
+void SharkMemoryManager::setMemoryExecutable() {
+ mm()->setMemoryExecutable();
+}
+
+#if SHARK_LLVM_VERSION >= 27
+void SharkMemoryManager::deallocateExceptionTable(void *ptr) {
+ mm()->deallocateExceptionTable(ptr);
+}
+
+void SharkMemoryManager::deallocateFunctionBody(void *ptr) {
+ mm()->deallocateFunctionBody(ptr);
+}
+#else
+void SharkMemoryManager::deallocateMemForFunction(const Function* F) {
+ return mm()->deallocateMemForFunction(F);
+}
+#endif
+
+uint8_t* SharkMemoryManager::allocateGlobal(uintptr_t Size,
+ unsigned int Alignment) {
+ return mm()->allocateGlobal(Size, Alignment);
+}
+
+#if SHARK_LLVM_VERSION < 27
+void* SharkMemoryManager::getDlsymTable() const {
+ return mm()->getDlsymTable();
+}
+
+void SharkMemoryManager::SetDlsymTable(void *ptr) {
+ mm()->SetDlsymTable(ptr);
+}
+#endif
+
+void SharkMemoryManager::setPoisonMemory(bool poison) {
+ mm()->setPoisonMemory(poison);
+}
+
+unsigned char *SharkMemoryManager::allocateSpace(intptr_t Size,
+ unsigned int Alignment) {
+ return mm()->allocateSpace(Size, Alignment);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkMemoryManager.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// SharkMemoryManager wraps the LLVM JIT Memory Manager. We could use
+// this to run our own memory allocation policies, but for now all we
+// use it for is figuring out where the resulting native code ended up.
+
+class SharkMemoryManager : public llvm::JITMemoryManager {
+ public:
+ SharkMemoryManager()
+ : _mm(llvm::JITMemoryManager::CreateDefaultMemManager()) {}
+
+ private:
+ llvm::JITMemoryManager* _mm;
+
+ private:
+ llvm::JITMemoryManager* mm() const {
+ return _mm;
+ }
+
+ private:
+ std::map<const llvm::Function*, SharkEntry*> _entry_map;
+
+ public:
+ void set_entry_for_function(const llvm::Function* function,
+ SharkEntry* entry) {
+ _entry_map[function] = entry;
+ }
+ SharkEntry* get_entry_for_function(const llvm::Function* function) {
+ return _entry_map[function];
+ }
+
+ public:
+ void AllocateGOT();
+ unsigned char* getGOTBase() const;
+ unsigned char* allocateStub(const llvm::GlobalValue* F,
+ unsigned StubSize,
+ unsigned Alignment);
+ unsigned char* startFunctionBody(const llvm::Function* F,
+ uintptr_t& ActualSize);
+ void endFunctionBody(const llvm::Function* F,
+ unsigned char* FunctionStart,
+ unsigned char* FunctionEnd);
+ unsigned char* startExceptionTable(const llvm::Function* F,
+ uintptr_t& ActualSize);
+ void endExceptionTable(const llvm::Function* F,
+ unsigned char* TableStart,
+ unsigned char* TableEnd,
+ unsigned char* FrameRegister);
+#if SHARK_LLVM_VERSION < 27
+ void* getDlsymTable() const;
+ void SetDlsymTable(void *ptr);
+#endif
+ void setPoisonMemory(bool);
+ uint8_t* allocateGlobal(uintptr_t, unsigned int);
+ void setMemoryWritable();
+ void setMemoryExecutable();
+#if SHARK_LLVM_VERSION >= 27
+ void deallocateExceptionTable(void *ptr);
+ void deallocateFunctionBody(void *ptr);
+#else
+ void deallocateMemForFunction(const llvm::Function* F);
+#endif
+ unsigned char *allocateSpace(intptr_t Size,
+ unsigned int Alignment);
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkNativeWrapper.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2009, 2010 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharkNativeWrapper.cpp.incl"
+
+using namespace llvm;
+
+void SharkNativeWrapper::initialize(const char *name) {
+ // Create the function
+ _function = Function::Create(
+ SharkType::entry_point_type(),
+ GlobalVariable::InternalLinkage,
+ name);
+
+ // Get our arguments
+ Function::arg_iterator ai = function()->arg_begin();
+ Argument *method = ai++;
+ method->setName("method");
+ Argument *base_pc = ai++;
+ base_pc->setName("base_pc");
+ code_buffer()->set_base_pc(base_pc);
+ Argument *thread = ai++;
+ thread->setName("thread");
+ set_thread(thread);
+
+ // Create and push our stack frame
+ builder()->SetInsertPoint(CreateBlock());
+ _stack = SharkStack::CreateBuildAndPushFrame(this, method);
+ NOT_PRODUCT(method = NULL);
+
+ // Create the oopmap. We use the one oopmap for every call site in
+ // the wrapper, which results in the odd mild inefficiency but is a
+ // damn sight easier to code.
+ OopMap *oopmap = new OopMap(
+ SharkStack::oopmap_slot_munge(stack()->oopmap_frame_size()),
+ SharkStack::oopmap_slot_munge(arg_size()));
+ oopmap->set_oop(SharkStack::slot2reg(stack()->method_slot_offset()));
+
+ // Set up the oop_tmp slot if required:
+ // - For static methods we use it to handlize the class argument
+ // for the call, and to protect the same during slow path locks
+ // (if synchronized).
+ // - For methods returning oops, we use it to protect the return
+ // value across safepoints or slow path unlocking.
+ if (is_static() || is_returning_oop()) {
+ _oop_tmp_slot = stack()->slot_addr(
+ stack()->oop_tmp_slot_offset(),
+ SharkType::oop_type(),
+ "oop_tmp_slot");
+
+ oopmap->set_oop(SharkStack::slot2reg(stack()->oop_tmp_slot_offset()));
+ }
+
+ // Set up the monitor slot, for synchronized methods
+ if (is_synchronized()) {
+ Unimplemented();
+ _lock_slot_offset = 23;
+ }
+
+ // Start building the argument list
+ std::vector<const Type*> param_types;
+ std::vector<Value*> param_values;
+ const PointerType *box_type = PointerType::getUnqual(SharkType::oop_type());
+
+ // First argument is the JNIEnv
+ param_types.push_back(SharkType::jniEnv_type());
+ param_values.push_back(
+ builder()->CreateAddressOfStructEntry(
+ thread,
+ JavaThread::jni_environment_offset(),
+ SharkType::jniEnv_type(),
+ "jni_environment"));
+
+ // For static methods, the second argument is the class
+ if (is_static()) {
+ builder()->CreateStore(
+ builder()->CreateInlineOop(
+ JNIHandles::make_local(
+ target()->method_holder()->klass_part()->java_mirror())),
+ oop_tmp_slot());
+
+ param_types.push_back(box_type);
+ param_values.push_back(oop_tmp_slot());
+
+ _receiver_slot_offset = stack()->oop_tmp_slot_offset();
+ }
+ else if (is_returning_oop()) {
+ // The oop_tmp slot is registered in the oopmap,
+ // so we need to clear it. This is one of the
+ // mild inefficiencies I mentioned earlier.
+ builder()->CreateStore(LLVMValue::null(), oop_tmp_slot());
+ }
+
+ // Parse the arguments
+ for (int i = 0; i < arg_size(); i++) {
+ int slot_offset = stack()->locals_slots_offset() + arg_size() - 1 - i;
+ int adjusted_offset = slot_offset;
+ BasicBlock *null, *not_null, *merge;
+ Value *box;
+ PHINode *phi;
+
+ switch (arg_type(i)) {
+ case T_VOID:
+ break;
+
+ case T_OBJECT:
+ case T_ARRAY:
+ null = CreateBlock("null");
+ not_null = CreateBlock("not_null");
+ merge = CreateBlock("merge");
+
+ box = stack()->slot_addr(slot_offset, SharkType::oop_type());
+ builder()->CreateCondBr(
+ builder()->CreateICmp(
+ ICmpInst::ICMP_EQ,
+ builder()->CreateLoad(box),
+ LLVMValue::null()),
+ null, not_null);
+
+ builder()->SetInsertPoint(null);
+ builder()->CreateBr(merge);
+
+ builder()->SetInsertPoint(not_null);
+ builder()->CreateBr(merge);
+
+ builder()->SetInsertPoint(merge);
+ phi = builder()->CreatePHI(box_type, "boxed_object");
+ phi->addIncoming(ConstantPointerNull::get(box_type), null);
+ phi->addIncoming(box, not_null);
+ box = phi;
+
+ param_types.push_back(box_type);
+ param_values.push_back(box);
+
+ oopmap->set_oop(SharkStack::slot2reg(slot_offset));
+
+ if (i == 0 && !is_static())
+ _receiver_slot_offset = slot_offset;
+
+ break;
+
+ case T_LONG:
+ case T_DOUBLE:
+ adjusted_offset--;
+ // fall through
+
+ default:
+ const Type *param_type = SharkType::to_stackType(arg_type(i));
+
+ param_types.push_back(param_type);
+ param_values.push_back(
+ builder()->CreateLoad(stack()->slot_addr(adjusted_offset, param_type)));
+ }
+ }
+
+ // The oopmap is now complete, and everything is written
+ // into the frame except the PC.
+ int pc_offset = code_buffer()->create_unique_offset();
+
+ _oop_maps = new OopMapSet();
+ oop_maps()->add_gc_map(pc_offset, oopmap);
+
+ builder()->CreateStore(
+ builder()->code_buffer_address(pc_offset),
+ stack()->slot_addr(stack()->pc_slot_offset()));
+
+ // Set up the Java frame anchor
+ stack()->CreateSetLastJavaFrame();
+
+ // Lock if necessary
+ if (is_synchronized())
+ Unimplemented();
+
+ // Change the thread state to _thread_in_native
+ CreateSetThreadState(_thread_in_native);
+
+ // Make the call
+ BasicType result_type = target()->result_type();
+ const Type* return_type;
+ if (result_type == T_VOID)
+ return_type = SharkType::void_type();
+ else if (is_returning_oop())
+ return_type = box_type;
+ else
+ return_type = SharkType::to_arrayType(result_type);
+ Value* native_function = builder()->CreateIntToPtr(
+ LLVMValue::intptr_constant((intptr_t) target()->native_function()),
+ PointerType::getUnqual(
+ FunctionType::get(return_type, param_types, false)));
+ Value *result = builder()->CreateCall(
+ native_function, param_values.begin(), param_values.end());
+
+ // Start the transition back to _thread_in_Java
+ CreateSetThreadState(_thread_in_native_trans);
+
+ // Make sure new state is visible in the GC thread
+ if (os::is_MP()) {
+ if (UseMembar)
+ builder()->CreateMemoryBarrier(SharkBuilder::BARRIER_STORELOAD);
+ else
+ CreateWriteMemorySerializePage();
+ }
+
+ // Handle safepoint operations, pending suspend requests,
+ // and pending asynchronous exceptions.
+ BasicBlock *check_thread = CreateBlock("check_thread");
+ BasicBlock *do_safepoint = CreateBlock("do_safepoint");
+ BasicBlock *safepointed = CreateBlock("safepointed");
+
+ Value *global_state = builder()->CreateLoad(
+ builder()->CreateIntToPtr(
+ LLVMValue::intptr_constant(
+ (intptr_t) SafepointSynchronize::address_of_state()),
+ PointerType::getUnqual(SharkType::jint_type())),
+ "global_state");
+
+ builder()->CreateCondBr(
+ builder()->CreateICmpNE(
+ global_state,
+ LLVMValue::jint_constant(SafepointSynchronize::_not_synchronized)),
+ do_safepoint, check_thread);
+
+ builder()->SetInsertPoint(check_thread);
+ Value *thread_state = builder()->CreateValueOfStructEntry(
+ thread,
+ JavaThread::suspend_flags_offset(),
+ SharkType::jint_type(),
+ "thread_state");
+
+ builder()->CreateCondBr(
+ builder()->CreateICmpNE(
+ thread_state,
+ LLVMValue::jint_constant(0)),
+ do_safepoint, safepointed);
+
+ builder()->SetInsertPoint(do_safepoint);
+ builder()->CreateCall(
+ builder()->check_special_condition_for_native_trans(), thread);
+ builder()->CreateBr(safepointed);
+
+ // Finally we can change the thread state to _thread_in_Java
+ builder()->SetInsertPoint(safepointed);
+ CreateSetThreadState(_thread_in_Java);
+
+ // Clear the frame anchor
+ stack()->CreateResetLastJavaFrame();
+
+ // If there is a pending exception then we can just unwind and
+ // return. It seems totally wrong that unlocking is skipped here
+ // but apparently the template interpreter does this so we do too.
+ BasicBlock *exception = CreateBlock("exception");
+ BasicBlock *no_exception = CreateBlock("no_exception");
+
+ builder()->CreateCondBr(
+ builder()->CreateICmpEQ(
+ CreateLoadPendingException(),
+ LLVMValue::null()),
+ no_exception, exception);
+
+ builder()->SetInsertPoint(exception);
+ CreateResetHandleBlock();
+ stack()->CreatePopFrame(0);
+ builder()->CreateRet(LLVMValue::jint_constant(0));
+
+ builder()->SetInsertPoint(no_exception);
+
+ // If the result was an oop then unbox it before
+ // releasing the handle it might be protected by
+ if (is_returning_oop()) {
+ BasicBlock *null = builder()->GetInsertBlock();
+ BasicBlock *not_null = CreateBlock("not_null");
+ BasicBlock *merge = CreateBlock("merge");
+
+ builder()->CreateCondBr(
+ builder()->CreateICmpNE(result, ConstantPointerNull::get(box_type)),
+ not_null, merge);
+
+ builder()->SetInsertPoint(not_null);
+ Value *unboxed_result = builder()->CreateLoad(result);
+ builder()->CreateBr(merge);
+
+ builder()->SetInsertPoint(merge);
+ PHINode *phi = builder()->CreatePHI(SharkType::oop_type(), "result");
+ phi->addIncoming(LLVMValue::null(), null);
+ phi->addIncoming(unboxed_result, not_null);
+ result = phi;
+ }
+
+ // Reset handle block
+ CreateResetHandleBlock();
+
+ // Unlock if necessary.
+ if (is_synchronized())
+ Unimplemented();
+
+ // Unwind and return
+ Value *result_addr = stack()->CreatePopFrame(type2size[result_type]);
+ if (result_type != T_VOID) {
+ bool needs_cast = false;
+ bool is_signed = false;
+ switch (result_type) {
+ case T_BOOLEAN:
+ result = builder()->CreateICmpNE(result, LLVMValue::jbyte_constant(0));
+ needs_cast = true;
+ break;
+
+ case T_CHAR:
+ needs_cast = true;
+ break;
+
+ case T_BYTE:
+ case T_SHORT:
+ needs_cast = true;
+ is_signed = true;
+ break;
+ }
+ if (needs_cast) {
+ result = builder()->CreateIntCast(
+ result, SharkType::to_stackType(result_type), is_signed);
+ }
+
+ builder()->CreateStore(
+ result,
+ builder()->CreateIntToPtr(
+ result_addr,
+ PointerType::getUnqual(SharkType::to_stackType(result_type))));
+ }
+ builder()->CreateRet(LLVMValue::jint_constant(0));
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkNativeWrapper.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+class SharkNativeWrapper : public SharkCompileInvariants {
+ friend class SharkStackWithNativeFrame;
+
+ public:
+ static SharkNativeWrapper* build(SharkBuilder* builder,
+ methodHandle target,
+ const char* name,
+ BasicType* arg_types,
+ BasicType return_type) {
+ return new SharkNativeWrapper(builder,
+ target,
+ name,
+ arg_types,
+ return_type);
+ }
+
+ private:
+ SharkNativeWrapper(SharkBuilder* builder,
+ methodHandle target,
+ const char* name,
+ BasicType* arg_types,
+ BasicType return_type)
+ : SharkCompileInvariants(NULL, builder),
+ _target(target),
+ _arg_types(arg_types),
+ _return_type(return_type),
+ _lock_slot_offset(0) { initialize(name); }
+
+ private:
+ void initialize(const char* name);
+
+ private:
+ methodHandle _target;
+ BasicType* _arg_types;
+ BasicType _return_type;
+ llvm::Function* _function;
+ SharkStack* _stack;
+ llvm::Value* _oop_tmp_slot;
+ OopMapSet* _oop_maps;
+ int _receiver_slot_offset;
+ int _lock_slot_offset;
+
+ // The method being compiled.
+ protected:
+ methodHandle target() const {
+ return _target;
+ }
+
+ // Properties of the method.
+ protected:
+ int arg_size() const {
+ return target()->size_of_parameters();
+ }
+ BasicType arg_type(int i) const {
+ return _arg_types[i];
+ }
+ BasicType return_type() const {
+ return _return_type;
+ }
+ bool is_static() const {
+ return target()->is_static();
+ }
+ bool is_synchronized() const {
+ return target()->is_synchronized();
+ }
+ bool is_returning_oop() const {
+ return target()->is_returning_oop();
+ }
+
+ // The LLVM function we are building.
+ public:
+ llvm::Function* function() const {
+ return _function;
+ }
+
+ // The Zero stack and our frame on it.
+ protected:
+ SharkStack* stack() const {
+ return _stack;
+ }
+
+ // Temporary oop storage.
+ protected:
+ llvm::Value* oop_tmp_slot() const {
+ assert(is_static() || is_returning_oop(), "should be");
+ return _oop_tmp_slot;
+ }
+
+ // Information required by nmethod::new_native_nmethod().
+ public:
+ int frame_size() const {
+ return stack()->oopmap_frame_size();
+ }
+ ByteSize receiver_offset() const {
+ return in_ByteSize(_receiver_slot_offset * wordSize);
+ }
+ ByteSize lock_offset() const {
+ return in_ByteSize(_lock_slot_offset * wordSize);
+ }
+ OopMapSet* oop_maps() const {
+ return _oop_maps;
+ }
+
+ // Helpers.
+ private:
+ llvm::BasicBlock* CreateBlock(const char* name = "") const {
+ return llvm::BasicBlock::Create(SharkContext::current(), name, function());
+ }
+ llvm::Value* thread_state_address() const {
+ return builder()->CreateAddressOfStructEntry(
+ thread(), JavaThread::thread_state_offset(),
+ llvm::PointerType::getUnqual(SharkType::jint_type()),
+ "thread_state_address");
+ }
+ llvm::Value* pending_exception_address() const {
+ return builder()->CreateAddressOfStructEntry(
+ thread(), Thread::pending_exception_offset(),
+ llvm::PointerType::getUnqual(SharkType::oop_type()),
+ "pending_exception_address");
+ }
+ void CreateSetThreadState(JavaThreadState state) const {
+ builder()->CreateStore(
+ LLVMValue::jint_constant(state), thread_state_address());
+ }
+ void CreateWriteMemorySerializePage() const {
+ builder()->CreateStore(
+ LLVMValue::jint_constant(1),
+ builder()->CreateIntToPtr(
+ builder()->CreateAdd(
+ LLVMValue::intptr_constant(
+ (intptr_t) os::get_memory_serialize_page()),
+ builder()->CreateAnd(
+ builder()->CreateLShr(
+ builder()->CreatePtrToInt(thread(), SharkType::intptr_type()),
+ LLVMValue::intptr_constant(os::get_serialize_page_shift_count())),
+ LLVMValue::intptr_constant(os::get_serialize_page_mask()))),
+ llvm::PointerType::getUnqual(SharkType::jint_type())));
+ }
+ void CreateResetHandleBlock() const {
+ llvm::Value *active_handles = builder()->CreateValueOfStructEntry(
+ thread(),
+ JavaThread::active_handles_offset(),
+ SharkType::jniHandleBlock_type(),
+ "active_handles");
+ builder()->CreateStore(
+ LLVMValue::intptr_constant(0),
+ builder()->CreateAddressOfStructEntry(
+ active_handles,
+ in_ByteSize(JNIHandleBlock::top_offset_in_bytes()),
+ llvm::PointerType::getUnqual(SharkType::intptr_type()),
+ "top"));
+ }
+ llvm::LoadInst* CreateLoadPendingException() const {
+ return builder()->CreateLoad(
+ pending_exception_address(), "pending_exception");
+ }
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkRuntime.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009, 2010 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharkRuntime.cpp.incl"
+
+using namespace llvm;
+
+JRT_ENTRY(int, SharkRuntime::find_exception_handler(JavaThread* thread,
+ int* indexes,
+ int num_indexes))
+ constantPoolHandle pool(thread, method(thread)->constants());
+ KlassHandle exc_klass(thread, ((oop) tos_at(thread, 0))->klass());
+
+ for (int i = 0; i < num_indexes; i++) {
+ klassOop tmp = pool->klass_at(indexes[i], CHECK_0);
+ KlassHandle chk_klass(thread, tmp);
+
+ if (exc_klass() == chk_klass())
+ return i;
+
+ if (exc_klass()->klass_part()->is_subtype_of(chk_klass()))
+ return i;
+ }
+
+ return -1;
+JRT_END
+
+JRT_ENTRY(void, SharkRuntime::monitorenter(JavaThread* thread,
+ BasicObjectLock* lock))
+ if (PrintBiasedLockingStatistics)
+ Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
+
+ Handle object(thread, lock->obj());
+ assert(Universe::heap()->is_in_reserved_or_null(object()), "should be");
+ if (UseBiasedLocking) {
+ // Retry fast entry if bias is revoked to avoid unnecessary inflation
+ ObjectSynchronizer::fast_enter(object, lock->lock(), true, CHECK);
+ } else {
+ ObjectSynchronizer::slow_enter(object, lock->lock(), CHECK);
+ }
+ assert(Universe::heap()->is_in_reserved_or_null(lock->obj()), "should be");
+JRT_END
+
+JRT_ENTRY(void, SharkRuntime::monitorexit(JavaThread* thread,
+ BasicObjectLock* lock))
+ Handle object(thread, lock->obj());
+ assert(Universe::heap()->is_in_reserved_or_null(object()), "should be");
+ if (lock == NULL || object()->is_unlocked()) {
+ THROW(vmSymbols::java_lang_IllegalMonitorStateException());
+ }
+ ObjectSynchronizer::slow_exit(object(), lock->lock(), thread);
+JRT_END
+
+JRT_ENTRY(void, SharkRuntime::new_instance(JavaThread* thread, int index))
+ klassOop k_oop = method(thread)->constants()->klass_at(index, CHECK);
+ instanceKlassHandle klass(THREAD, k_oop);
+
+ // Make sure we are not instantiating an abstract klass
+ klass->check_valid_for_instantiation(true, CHECK);
+
+ // Make sure klass is initialized
+ klass->initialize(CHECK);
+
+ // At this point the class may not be fully initialized
+ // because of recursive initialization. If it is fully
+ // initialized & has_finalized is not set, we rewrite
+ // it into its fast version (Note: no locking is needed
+ // here since this is an atomic byte write and can be
+ // done more than once).
+ //
+ // Note: In case of classes with has_finalized we don't
+ // rewrite since that saves us an extra check in
+ // the fast version which then would call the
+ // slow version anyway (and do a call back into
+ // Java).
+ // If we have a breakpoint, then we don't rewrite
+ // because the _breakpoint bytecode would be lost.
+ oop obj = klass->allocate_instance(CHECK);
+ thread->set_vm_result(obj);
+JRT_END
+
+JRT_ENTRY(void, SharkRuntime::newarray(JavaThread* thread,
+ BasicType type,
+ int size))
+ oop obj = oopFactory::new_typeArray(type, size, CHECK);
+ thread->set_vm_result(obj);
+JRT_END
+
+JRT_ENTRY(void, SharkRuntime::anewarray(JavaThread* thread,
+ int index,
+ int size))
+ klassOop klass = method(thread)->constants()->klass_at(index, CHECK);
+ objArrayOop obj = oopFactory::new_objArray(klass, size, CHECK);
+ thread->set_vm_result(obj);
+JRT_END
+
+JRT_ENTRY(void, SharkRuntime::multianewarray(JavaThread* thread,
+ int index,
+ int ndims,
+ int* dims))
+ klassOop klass = method(thread)->constants()->klass_at(index, CHECK);
+ oop obj = arrayKlass::cast(klass)->multi_allocate(ndims, dims, CHECK);
+ thread->set_vm_result(obj);
+JRT_END
+
+JRT_ENTRY(void, SharkRuntime::register_finalizer(JavaThread* thread,
+ oop object))
+ assert(object->is_oop(), "should be");
+ assert(object->klass()->klass_part()->has_finalizer(), "should have");
+ instanceKlass::register_finalizer(instanceOop(object), CHECK);
+JRT_END
+
+JRT_ENTRY(void, SharkRuntime::throw_ArithmeticException(JavaThread* thread,
+ const char* file,
+ int line))
+ Exceptions::_throw_msg(
+ thread, file, line,
+ vmSymbols::java_lang_ArithmeticException(),
+ "");
+JRT_END
+
+JRT_ENTRY(void, SharkRuntime::throw_ArrayIndexOutOfBoundsException(
+ JavaThread* thread,
+ const char* file,
+ int line,
+ int index))
+ char msg[jintAsStringSize];
+ snprintf(msg, sizeof(msg), "%d", index);
+ Exceptions::_throw_msg(
+ thread, file, line,
+ vmSymbols::java_lang_ArrayIndexOutOfBoundsException(),
+ msg);
+JRT_END
+
+JRT_ENTRY(void, SharkRuntime::throw_ClassCastException(JavaThread* thread,
+ const char* file,
+ int line))
+ Exceptions::_throw_msg(
+ thread, file, line,
+ vmSymbols::java_lang_ClassCastException(),
+ "");
+JRT_END
+
+JRT_ENTRY(void, SharkRuntime::throw_NullPointerException(JavaThread* thread,
+ const char* file,
+ int line))
+ Exceptions::_throw_msg(
+ thread, file, line,
+ vmSymbols::java_lang_NullPointerException(),
+ "");
+JRT_END
+
+// Non-VM calls
+// Nothing in these must ever GC!
+
+void SharkRuntime::dump(const char *name, intptr_t value) {
+ oop valueOop = (oop) value;
+ tty->print("%s = ", name);
+ if (valueOop->is_oop(true))
+ valueOop->print_on(tty);
+ else if (value >= ' ' && value <= '~')
+ tty->print("'%c' (%d)", value, value);
+ else
+ tty->print("%p", value);
+ tty->print_cr("");
+}
+
+bool SharkRuntime::is_subtype_of(klassOop check_klass, klassOop object_klass) {
+ return object_klass->klass_part()->is_subtype_of(check_klass);
+}
+
+int SharkRuntime::uncommon_trap(JavaThread* thread, int trap_request) {
+ Thread *THREAD = thread;
+
+ // In C2, uncommon_trap_blob creates a frame, so all the various
+ // deoptimization functions expect to find the frame of the method
+ // being deopted one frame down on the stack. We create a dummy
+ // frame to mirror this.
+ FakeStubFrame *stubframe = FakeStubFrame::build(CHECK_0);
+ thread->push_zero_frame(stubframe);
+
+ // Initiate the trap
+ thread->set_last_Java_frame();
+ Deoptimization::UnrollBlock *urb =
+ Deoptimization::uncommon_trap(thread, trap_request);
+ thread->reset_last_Java_frame();
+
+ // Pop our dummy frame and the frame being deoptimized
+ thread->pop_zero_frame();
+ thread->pop_zero_frame();
+
+ // Push skeleton frames
+ int number_of_frames = urb->number_of_frames();
+ for (int i = 0; i < number_of_frames; i++) {
+ intptr_t size = urb->frame_sizes()[i];
+ InterpreterFrame *frame = InterpreterFrame::build(size, CHECK_0);
+ thread->push_zero_frame(frame);
+ }
+
+ // Push another dummy frame
+ stubframe = FakeStubFrame::build(CHECK_0);
+ thread->push_zero_frame(stubframe);
+
+ // Fill in the skeleton frames
+ thread->set_last_Java_frame();
+ Deoptimization::unpack_frames(thread, Deoptimization::Unpack_uncommon_trap);
+ thread->reset_last_Java_frame();
+
+ // Pop our dummy frame
+ thread->pop_zero_frame();
+
+ // Fall back into the interpreter
+ return number_of_frames;
+}
+
+FakeStubFrame* FakeStubFrame::build(TRAPS) {
+ ZeroStack *stack = ((JavaThread *) THREAD)->zero_stack();
+ stack->overflow_check(header_words, CHECK_NULL);
+
+ stack->push(0); // next_frame, filled in later
+ intptr_t *fp = stack->sp();
+ assert(fp - stack->sp() == next_frame_off, "should be");
+
+ stack->push(FAKE_STUB_FRAME);
+ assert(fp - stack->sp() == frame_type_off, "should be");
+
+ return (FakeStubFrame *) fp;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkRuntime.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009, 2010 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+class SharkRuntime : public AllStatic {
+ // VM calls
+ public:
+ static int find_exception_handler(JavaThread* thread,
+ int* indexes,
+ int num_indexes);
+
+ static void monitorenter(JavaThread* thread, BasicObjectLock* lock);
+ static void monitorexit(JavaThread* thread, BasicObjectLock* lock);
+
+ static void new_instance(JavaThread* thread, int index);
+ static void newarray(JavaThread* thread, BasicType type, int size);
+ static void anewarray(JavaThread* thread, int index, int size);
+ static void multianewarray(JavaThread* thread,
+ int index,
+ int ndims,
+ int* dims);
+
+ static void register_finalizer(JavaThread* thread, oop object);
+
+ static void throw_ArithmeticException(JavaThread* thread,
+ const char* file,
+ int line);
+ static void throw_ArrayIndexOutOfBoundsException(JavaThread* thread,
+ const char* file,
+ int line,
+ int index);
+ static void throw_ClassCastException(JavaThread* thread,
+ const char* file,
+ int line);
+ static void throw_NullPointerException(JavaThread* thread,
+ const char* file,
+ int line);
+
+ // Helpers for VM calls
+ private:
+ static const SharkFrame* last_frame(JavaThread *thread) {
+ return thread->last_frame().zero_sharkframe();
+ }
+ static methodOop method(JavaThread *thread) {
+ return last_frame(thread)->method();
+ }
+ static address bcp(JavaThread *thread, int bci) {
+ return method(thread)->code_base() + bci;
+ }
+ static int two_byte_index(JavaThread *thread, int bci) {
+ return Bytes::get_Java_u2(bcp(thread, bci) + 1);
+ }
+ static intptr_t tos_at(JavaThread *thread, int offset) {
+ return *(thread->zero_stack()->sp() + offset);
+ }
+
+ // Non-VM calls
+ public:
+ static void dump(const char *name, intptr_t value);
+ static bool is_subtype_of(klassOop check_klass, klassOop object_klass);
+ static int uncommon_trap(JavaThread* thread, int trap_request);
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkStack.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009, 2010 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharkStack.cpp.incl"
+
+using namespace llvm;
+
+void SharkStack::initialize(Value* method) {
+ bool setup_sp_and_method = (method != NULL);
+
+ int locals_words = max_locals();
+ int extra_locals = locals_words - arg_size();
+ int header_words = SharkFrame::header_words;
+ int monitor_words = max_monitors()*frame::interpreter_frame_monitor_size();
+ int stack_words = max_stack();
+ int frame_words = header_words + monitor_words + stack_words;
+
+ _extended_frame_size = frame_words + locals_words;
+
+ // Update the stack pointer
+ Value *stack_pointer = builder()->CreateSub(
+ CreateLoadStackPointer(),
+ LLVMValue::intptr_constant((frame_words + extra_locals) * wordSize));
+ CreateStackOverflowCheck(stack_pointer);
+ if (setup_sp_and_method)
+ CreateStoreStackPointer(stack_pointer);
+
+ // Create the frame
+ _frame = builder()->CreateIntToPtr(
+ stack_pointer,
+ PointerType::getUnqual(
+ ArrayType::get(SharkType::intptr_type(), extended_frame_size())),
+ "frame");
+ int offset = 0;
+
+ // Expression stack
+ _stack_slots_offset = offset;
+ offset += stack_words;
+
+ // Monitors
+ _monitors_slots_offset = offset;
+ offset += monitor_words;
+
+ // Temporary oop slot
+ _oop_tmp_slot_offset = offset++;
+
+ // Method pointer
+ _method_slot_offset = offset++;
+ if (setup_sp_and_method) {
+ builder()->CreateStore(
+ method, slot_addr(method_slot_offset(), SharkType::methodOop_type()));
+ }
+
+ // Unextended SP
+ builder()->CreateStore(stack_pointer, slot_addr(offset++));
+
+ // PC
+ _pc_slot_offset = offset++;
+
+ // Frame header
+ builder()->CreateStore(
+ LLVMValue::intptr_constant(ZeroFrame::SHARK_FRAME), slot_addr(offset++));
+ Value *fp = slot_addr(offset++);
+
+ // Local variables
+ _locals_slots_offset = offset;
+ offset += locals_words;
+
+ // Push the frame
+ assert(offset == extended_frame_size(), "should do");
+ builder()->CreateStore(CreateLoadFramePointer(), fp);
+ CreateStoreFramePointer(
+ builder()->CreatePtrToInt(fp, SharkType::intptr_type()));
+}
+
+// This function should match ZeroStack::overflow_check
+void SharkStack::CreateStackOverflowCheck(Value* sp) {
+ BasicBlock *zero_ok = CreateBlock("zero_stack_ok");
+ BasicBlock *overflow = CreateBlock("stack_overflow");
+ BasicBlock *abi_ok = CreateBlock("abi_stack_ok");
+
+ // Check the Zero stack
+ builder()->CreateCondBr(
+ builder()->CreateICmpULT(sp, stack_base()),
+ overflow, zero_ok);
+
+ // Check the ABI stack
+ builder()->SetInsertPoint(zero_ok);
+ Value *stack_top = builder()->CreateSub(
+ builder()->CreateValueOfStructEntry(
+ thread(),
+ Thread::stack_base_offset(),
+ SharkType::intptr_type(),
+ "abi_base"),
+ builder()->CreateValueOfStructEntry(
+ thread(),
+ Thread::stack_size_offset(),
+ SharkType::intptr_type(),
+ "abi_size"));
+ Value *free_stack = builder()->CreateSub(
+ builder()->CreatePtrToInt(
+ builder()->CreateGetFrameAddress(),
+ SharkType::intptr_type(),
+ "abi_sp"),
+ stack_top);
+ builder()->CreateCondBr(
+ builder()->CreateICmpULT(
+ free_stack,
+ LLVMValue::intptr_constant(StackShadowPages * os::vm_page_size())),
+ overflow, abi_ok);
+
+ // Handle overflows
+ builder()->SetInsertPoint(overflow);
+ builder()->CreateCall(builder()->throw_StackOverflowError(), thread());
+ builder()->CreateRet(LLVMValue::jint_constant(0));
+
+ builder()->SetInsertPoint(abi_ok);
+}
+
+Value* SharkStack::CreatePopFrame(int result_slots) {
+ assert(result_slots >= 0 && result_slots <= 2, "should be");
+ int locals_to_pop = max_locals() - result_slots;
+
+ Value *fp = CreateLoadFramePointer();
+ Value *sp = builder()->CreateAdd(
+ fp,
+ LLVMValue::intptr_constant((1 + locals_to_pop) * wordSize));
+
+ CreateStoreStackPointer(sp);
+ CreateStoreFramePointer(
+ builder()->CreateLoad(
+ builder()->CreateIntToPtr(
+ fp, PointerType::getUnqual(SharkType::intptr_type()))));
+
+ return sp;
+}
+
+Value* SharkStack::slot_addr(int offset,
+ const Type* type,
+ const char* name) const {
+ bool needs_cast = type && type != SharkType::intptr_type();
+
+ Value* result = builder()->CreateStructGEP(
+ _frame, offset, needs_cast ? "" : name);
+
+ if (needs_cast) {
+ result = builder()->CreateBitCast(
+ result, PointerType::getUnqual(type), name);
+ }
+ return result;
+}
+
+// The bits that differentiate stacks with normal and native frames on top
+
+SharkStack* SharkStack::CreateBuildAndPushFrame(SharkFunction* function,
+ Value* method) {
+ return new SharkStackWithNormalFrame(function, method);
+}
+SharkStack* SharkStack::CreateBuildAndPushFrame(SharkNativeWrapper* wrapper,
+ Value* method) {
+ return new SharkStackWithNativeFrame(wrapper, method);
+}
+
+SharkStackWithNormalFrame::SharkStackWithNormalFrame(SharkFunction* function,
+ Value* method)
+ : SharkStack(function), _function(function) {
+ // For normal frames, the stack pointer and the method slot will
+ // be set during each decache, so it is not necessary to do them
+ // at the time the frame is created. However, we set them for
+ // non-PRODUCT builds to make crash dumps easier to understand.
+ initialize(PRODUCT_ONLY(NULL) NOT_PRODUCT(method));
+}
+SharkStackWithNativeFrame::SharkStackWithNativeFrame(SharkNativeWrapper* wrp,
+ Value* method)
+ : SharkStack(wrp), _wrapper(wrp) {
+ initialize(method);
+}
+
+int SharkStackWithNormalFrame::arg_size() const {
+ return function()->arg_size();
+}
+int SharkStackWithNativeFrame::arg_size() const {
+ return wrapper()->arg_size();
+}
+
+int SharkStackWithNormalFrame::max_locals() const {
+ return function()->max_locals();
+}
+int SharkStackWithNativeFrame::max_locals() const {
+ return wrapper()->arg_size();
+}
+
+int SharkStackWithNormalFrame::max_stack() const {
+ return function()->max_stack();
+}
+int SharkStackWithNativeFrame::max_stack() const {
+ return 0;
+}
+
+int SharkStackWithNormalFrame::max_monitors() const {
+ return function()->max_monitors();
+}
+int SharkStackWithNativeFrame::max_monitors() const {
+ return wrapper()->is_synchronized() ? 1 : 0;
+}
+
+BasicBlock* SharkStackWithNormalFrame::CreateBlock(const char* name) const {
+ return function()->CreateBlock(name);
+}
+BasicBlock* SharkStackWithNativeFrame::CreateBlock(const char* name) const {
+ return wrapper()->CreateBlock(name);
+}
+
+address SharkStackWithNormalFrame::interpreter_entry_point() const {
+ return (address) CppInterpreter::normal_entry;
+}
+address SharkStackWithNativeFrame::interpreter_entry_point() const {
+ return (address) CppInterpreter::native_entry;
+}
+
+#ifndef PRODUCT
+void SharkStack::CreateAssertLastJavaSPIsNull() const {
+#ifdef ASSERT
+ BasicBlock *fail = CreateBlock("assert_failed");
+ BasicBlock *pass = CreateBlock("assert_ok");
+
+ builder()->CreateCondBr(
+ builder()->CreateICmpEQ(
+ builder()->CreateLoad(last_Java_sp_addr()),
+ LLVMValue::intptr_constant(0)),
+ pass, fail);
+
+ builder()->SetInsertPoint(fail);
+ builder()->CreateShouldNotReachHere(__FILE__, __LINE__);
+ builder()->CreateUnreachable();
+
+ builder()->SetInsertPoint(pass);
+#endif // ASSERT
+}
+#endif // !PRODUCT
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkStack.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009, 2010 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+class SharkFunction;
+class SharkNativeWrapper;
+class SharkStackWithNormalFrame;
+class SharkStackWithNativeFrame;
+
+class SharkStack : public SharkCompileInvariants {
+ public:
+ static SharkStack* CreateBuildAndPushFrame(
+ SharkFunction* function, llvm::Value* method);
+ static SharkStack* CreateBuildAndPushFrame(
+ SharkNativeWrapper* wrapper, llvm::Value* method);
+
+ protected:
+ SharkStack(const SharkCompileInvariants* parent)
+ : SharkCompileInvariants(parent) {}
+
+ protected:
+ void initialize(llvm::Value* method);
+
+ protected:
+ void CreateStackOverflowCheck(llvm::Value* sp);
+
+ // Properties of the method being compiled
+ protected:
+ virtual int arg_size() const = 0;
+ virtual int max_locals() const = 0;
+ virtual int max_stack() const = 0;
+ virtual int max_monitors() const = 0;
+
+ // BasicBlock creation
+ protected:
+ virtual llvm::BasicBlock* CreateBlock(const char* name = "") const = 0;
+
+ // Interpreter entry point for bailouts
+ protected:
+ virtual address interpreter_entry_point() const = 0;
+
+ // Interface with the Zero stack
+ private:
+ llvm::Value* zero_stack() const {
+ return builder()->CreateAddressOfStructEntry(
+ thread(),
+ JavaThread::zero_stack_offset(),
+ SharkType::zeroStack_type(),
+ "zero_stack");
+ }
+ llvm::Value* stack_base() const {
+ return builder()->CreateValueOfStructEntry(
+ zero_stack(),
+ ZeroStack::base_offset(),
+ SharkType::intptr_type(),
+ "stack_base");
+ }
+ llvm::Value* stack_pointer_addr() const {
+ return builder()->CreateAddressOfStructEntry(
+ zero_stack(),
+ ZeroStack::sp_offset(),
+ llvm::PointerType::getUnqual(SharkType::intptr_type()),
+ "stack_pointer_addr");
+ }
+ llvm::Value* frame_pointer_addr() const {
+ return builder()->CreateAddressOfStructEntry(
+ thread(),
+ JavaThread::top_zero_frame_offset(),
+ llvm::PointerType::getUnqual(SharkType::intptr_type()),
+ "frame_pointer_addr");
+ }
+
+ public:
+ llvm::LoadInst* CreateLoadStackPointer(const char *name = "") {
+ return builder()->CreateLoad(stack_pointer_addr(), name);
+ }
+ llvm::StoreInst* CreateStoreStackPointer(llvm::Value* value) {
+ return builder()->CreateStore(value, stack_pointer_addr());
+ }
+ llvm::LoadInst* CreateLoadFramePointer(const char *name = "") {
+ return builder()->CreateLoad(frame_pointer_addr(), name);
+ }
+ llvm::StoreInst* CreateStoreFramePointer(llvm::Value* value) {
+ return builder()->CreateStore(value, frame_pointer_addr());
+ }
+ llvm::Value* CreatePopFrame(int result_slots);
+
+ // Interface with the frame anchor
+ private:
+ llvm::Value* last_Java_sp_addr() const {
+ return builder()->CreateAddressOfStructEntry(
+ thread(),
+ JavaThread::last_Java_sp_offset(),
+ llvm::PointerType::getUnqual(SharkType::intptr_type()),
+ "last_Java_sp_addr");
+ }
+ llvm::Value* last_Java_fp_addr() const {
+ return builder()->CreateAddressOfStructEntry(
+ thread(),
+ JavaThread::last_Java_fp_offset(),
+ llvm::PointerType::getUnqual(SharkType::intptr_type()),
+ "last_Java_fp_addr");
+ }
+
+ public:
+ void CreateSetLastJavaFrame() {
+ // Note that whenever _last_Java_sp != NULL other anchor fields
+ // must be valid. The profiler apparently depends on this.
+ NOT_PRODUCT(CreateAssertLastJavaSPIsNull());
+ builder()->CreateStore(CreateLoadFramePointer(), last_Java_fp_addr());
+ // XXX There's last_Java_pc as well, but I don't think anything uses it
+ // Also XXX: should we fence here? Zero doesn't...
+ builder()->CreateStore(CreateLoadStackPointer(), last_Java_sp_addr());
+ // Also also XXX: we could probably cache the sp (and the fp we know??)
+ }
+ void CreateResetLastJavaFrame() {
+ builder()->CreateStore(LLVMValue::intptr_constant(0), last_Java_sp_addr());
+ }
+
+ private:
+ void CreateAssertLastJavaSPIsNull() const PRODUCT_RETURN;
+
+ // Our method's frame
+ private:
+ llvm::Value* _frame;
+ int _extended_frame_size;
+ int _stack_slots_offset;
+
+ public:
+ int extended_frame_size() const {
+ return _extended_frame_size;
+ }
+ int oopmap_frame_size() const {
+ return extended_frame_size() - arg_size();
+ }
+
+ // Offsets of things in the frame
+ private:
+ int _monitors_slots_offset;
+ int _oop_tmp_slot_offset;
+ int _method_slot_offset;
+ int _pc_slot_offset;
+ int _locals_slots_offset;
+
+ public:
+ int stack_slots_offset() const {
+ return _stack_slots_offset;
+ }
+ int oop_tmp_slot_offset() const {
+ return _oop_tmp_slot_offset;
+ }
+ int method_slot_offset() const {
+ return _method_slot_offset;
+ }
+ int pc_slot_offset() const {
+ return _pc_slot_offset;
+ }
+ int locals_slots_offset() const {
+ return _locals_slots_offset;
+ }
+ int monitor_offset(int index) const {
+ assert(index >= 0 && index < max_monitors(), "invalid monitor index");
+ return _monitors_slots_offset +
+ (max_monitors() - 1 - index) * frame::interpreter_frame_monitor_size();
+ }
+ int monitor_object_offset(int index) const {
+ return monitor_offset(index) +
+ (BasicObjectLock::obj_offset_in_bytes() >> LogBytesPerWord);
+ }
+ int monitor_header_offset(int index) const {
+ return monitor_offset(index) +
+ ((BasicObjectLock::lock_offset_in_bytes() +
+ BasicLock::displaced_header_offset_in_bytes()) >> LogBytesPerWord);
+ }
+
+ // Addresses of things in the frame
+ public:
+ llvm::Value* slot_addr(int offset,
+ const llvm::Type* type = NULL,
+ const char* name = "") const;
+
+ llvm::Value* monitor_addr(int index) const {
+ return slot_addr(
+ monitor_offset(index),
+ SharkType::monitor_type(),
+ "monitor");
+ }
+ llvm::Value* monitor_object_addr(int index) const {
+ return slot_addr(
+ monitor_object_offset(index),
+ SharkType::oop_type(),
+ "object_addr");
+ }
+ llvm::Value* monitor_header_addr(int index) const {
+ return slot_addr(
+ monitor_header_offset(index),
+ SharkType::intptr_type(),
+ "displaced_header_addr");
+ }
+
+ // oopmap helpers
+ public:
+ static int oopmap_slot_munge(int offset) {
+ return offset << (LogBytesPerWord - LogBytesPerInt);
+ }
+ static VMReg slot2reg(int offset) {
+ return VMRegImpl::stack2reg(oopmap_slot_munge(offset));
+ }
+};
+
+class SharkStackWithNormalFrame : public SharkStack {
+ friend class SharkStack;
+
+ protected:
+ SharkStackWithNormalFrame(SharkFunction* function, llvm::Value* method);
+
+ private:
+ SharkFunction* _function;
+
+ private:
+ SharkFunction* function() const {
+ return _function;
+ }
+
+ // Properties of the method being compiled
+ private:
+ int arg_size() const;
+ int max_locals() const;
+ int max_stack() const;
+ int max_monitors() const;
+
+ // BasicBlock creation
+ private:
+ llvm::BasicBlock* CreateBlock(const char* name = "") const;
+
+ // Interpreter entry point for bailouts
+ private:
+ address interpreter_entry_point() const;
+};
+
+class SharkStackWithNativeFrame : public SharkStack {
+ friend class SharkStack;
+
+ protected:
+ SharkStackWithNativeFrame(SharkNativeWrapper* wrapper, llvm::Value* method);
+
+ private:
+ SharkNativeWrapper* _wrapper;
+
+ private:
+ SharkNativeWrapper* wrapper() const {
+ return _wrapper;
+ }
+
+ // Properties of the method being compiled
+ private:
+ int arg_size() const;
+ int max_locals() const;
+ int max_stack() const;
+ int max_monitors() const;
+
+ // BasicBlock creation
+ private:
+ llvm::BasicBlock* CreateBlock(const char* name = "") const;
+
+ // Interpreter entry point for bailouts
+ private:
+ address interpreter_entry_point() const;
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkState.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,389 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharkState.cpp.incl"
+
+using namespace llvm;
+
+void SharkState::initialize(const SharkState *state) {
+ _locals = NEW_RESOURCE_ARRAY(SharkValue*, max_locals());
+ _stack = NEW_RESOURCE_ARRAY(SharkValue*, max_stack());
+
+ NOT_PRODUCT(memset(_locals, 23, max_locals() * sizeof(SharkValue *)));
+ NOT_PRODUCT(memset(_stack, 23, max_stack() * sizeof(SharkValue *)));
+ _sp = _stack;
+
+ if (state) {
+ for (int i = 0; i < max_locals(); i++) {
+ SharkValue *value = state->local(i);
+ if (value)
+ value = value->clone();
+ set_local(i, value);
+ }
+
+ for (int i = state->stack_depth() - 1; i >= 0; i--) {
+ SharkValue *value = state->stack(i);
+ if (value)
+ value = value->clone();
+ push(value);
+ }
+ }
+
+ set_num_monitors(state ? state->num_monitors() : 0);
+}
+
+bool SharkState::equal_to(SharkState *other) {
+ if (target() != other->target())
+ return false;
+
+ if (method() != other->method())
+ return false;
+
+ if (oop_tmp() != other->oop_tmp())
+ return false;
+
+ if (max_locals() != other->max_locals())
+ return false;
+
+ if (stack_depth() != other->stack_depth())
+ return false;
+
+ if (num_monitors() != other->num_monitors())
+ return false;
+
+ if (has_safepointed() != other->has_safepointed())
+ return false;
+
+ // Local variables
+ for (int i = 0; i < max_locals(); i++) {
+ SharkValue *value = local(i);
+ SharkValue *other_value = other->local(i);
+
+ if (value == NULL) {
+ if (other_value != NULL)
+ return false;
+ }
+ else {
+ if (other_value == NULL)
+ return false;
+
+ if (!value->equal_to(other_value))
+ return false;
+ }
+ }
+
+ // Expression stack
+ for (int i = 0; i < stack_depth(); i++) {
+ SharkValue *value = stack(i);
+ SharkValue *other_value = other->stack(i);
+
+ if (value == NULL) {
+ if (other_value != NULL)
+ return false;
+ }
+ else {
+ if (other_value == NULL)
+ return false;
+
+ if (!value->equal_to(other_value))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void SharkState::merge(SharkState* other,
+ BasicBlock* other_block,
+ BasicBlock* this_block) {
+ // Method
+ Value *this_method = this->method();
+ Value *other_method = other->method();
+ if (this_method != other_method) {
+ PHINode *phi = builder()->CreatePHI(SharkType::methodOop_type(), "method");
+ phi->addIncoming(this_method, this_block);
+ phi->addIncoming(other_method, other_block);
+ set_method(phi);
+ }
+
+ // Temporary oop slot
+ Value *this_oop_tmp = this->oop_tmp();
+ Value *other_oop_tmp = other->oop_tmp();
+ if (this_oop_tmp != other_oop_tmp) {
+ assert(this_oop_tmp && other_oop_tmp, "can't merge NULL with non-NULL");
+ PHINode *phi = builder()->CreatePHI(SharkType::oop_type(), "oop_tmp");
+ phi->addIncoming(this_oop_tmp, this_block);
+ phi->addIncoming(other_oop_tmp, other_block);
+ set_oop_tmp(phi);
+ }
+
+ // Monitors
+ assert(this->num_monitors() == other->num_monitors(), "should be");
+
+ // Local variables
+ assert(this->max_locals() == other->max_locals(), "should be");
+ for (int i = 0; i < max_locals(); i++) {
+ SharkValue *this_value = this->local(i);
+ SharkValue *other_value = other->local(i);
+ assert((this_value == NULL) == (other_value == NULL), "should be");
+ if (this_value != NULL) {
+ char name[18];
+ snprintf(name, sizeof(name), "local_%d_", i);
+ set_local(i, this_value->merge(
+ builder(), other_value, other_block, this_block, name));
+ }
+ }
+
+ // Expression stack
+ assert(this->stack_depth() == other->stack_depth(), "should be");
+ for (int i = 0; i < stack_depth(); i++) {
+ SharkValue *this_value = this->stack(i);
+ SharkValue *other_value = other->stack(i);
+ assert((this_value == NULL) == (other_value == NULL), "should be");
+ if (this_value != NULL) {
+ char name[18];
+ snprintf(name, sizeof(name), "stack_%d_", i);
+ set_stack(i, this_value->merge(
+ builder(), other_value, other_block, this_block, name));
+ }
+ }
+
+ // Safepointed status
+ set_has_safepointed(this->has_safepointed() && other->has_safepointed());
+}
+
+void SharkState::replace_all(SharkValue* old_value, SharkValue* new_value) {
+ // Local variables
+ for (int i = 0; i < max_locals(); i++) {
+ if (local(i) == old_value)
+ set_local(i, new_value);
+ }
+
+ // Expression stack
+ for (int i = 0; i < stack_depth(); i++) {
+ if (stack(i) == old_value)
+ set_stack(i, new_value);
+ }
+}
+
+SharkNormalEntryState::SharkNormalEntryState(SharkTopLevelBlock* block,
+ Value* method)
+ : SharkState(block) {
+ assert(!block->stack_depth_at_entry(), "entry block shouldn't have stack");
+
+ // Local variables
+ for (int i = 0; i < max_locals(); i++) {
+ ciType *type = block->local_type_at_entry(i);
+
+ SharkValue *value = NULL;
+ switch (type->basic_type()) {
+ case T_INT:
+ case T_LONG:
+ case T_FLOAT:
+ case T_DOUBLE:
+ case T_OBJECT:
+ case T_ARRAY:
+ if (i >= arg_size()) {
+ ShouldNotReachHere();
+ }
+ value = SharkValue::create_generic(type, NULL, i == 0 && !is_static());
+ break;
+
+ case ciTypeFlow::StateVector::T_NULL:
+ value = SharkValue::null();
+ break;
+
+ case ciTypeFlow::StateVector::T_BOTTOM:
+ break;
+
+ case ciTypeFlow::StateVector::T_LONG2:
+ case ciTypeFlow::StateVector::T_DOUBLE2:
+ break;
+
+ default:
+ ShouldNotReachHere();
+ }
+ set_local(i, value);
+ }
+ SharkNormalEntryCacher(block->function(), method).scan(this);
+}
+
+SharkOSREntryState::SharkOSREntryState(SharkTopLevelBlock* block,
+ Value* method,
+ Value* osr_buf)
+ : SharkState(block) {
+ assert(!block->stack_depth_at_entry(), "entry block shouldn't have stack");
+ set_num_monitors(block->ciblock()->monitor_count());
+
+ // Local variables
+ for (int i = 0; i < max_locals(); i++) {
+ ciType *type = block->local_type_at_entry(i);
+
+ SharkValue *value = NULL;
+ switch (type->basic_type()) {
+ case T_INT:
+ case T_LONG:
+ case T_FLOAT:
+ case T_DOUBLE:
+ case T_OBJECT:
+ case T_ARRAY:
+ value = SharkValue::create_generic(type, NULL, false);
+ break;
+
+ case ciTypeFlow::StateVector::T_NULL:
+ value = SharkValue::null();
+ break;
+
+ case ciTypeFlow::StateVector::T_BOTTOM:
+ break;
+
+ case ciTypeFlow::StateVector::T_LONG2:
+ case ciTypeFlow::StateVector::T_DOUBLE2:
+ break;
+
+ default:
+ ShouldNotReachHere();
+ }
+ set_local(i, value);
+ }
+ SharkOSREntryCacher(block->function(), method, osr_buf).scan(this);
+}
+
+SharkPHIState::SharkPHIState(SharkTopLevelBlock* block)
+ : SharkState(block), _block(block) {
+ BasicBlock *saved_insert_point = builder()->GetInsertBlock();
+ builder()->SetInsertPoint(block->entry_block());
+ char name[18];
+
+ // Method
+ set_method(builder()->CreatePHI(SharkType::methodOop_type(), "method"));
+
+ // Local variables
+ for (int i = 0; i < max_locals(); i++) {
+ ciType *type = block->local_type_at_entry(i);
+ if (type->basic_type() == (BasicType) ciTypeFlow::StateVector::T_NULL) {
+ // XXX we could do all kinds of clever stuff here
+ type = ciType::make(T_OBJECT); // XXX what about T_ARRAY?
+ }
+
+ SharkValue *value = NULL;
+ switch (type->basic_type()) {
+ case T_INT:
+ case T_LONG:
+ case T_FLOAT:
+ case T_DOUBLE:
+ case T_OBJECT:
+ case T_ARRAY:
+ snprintf(name, sizeof(name), "local_%d_", i);
+ value = SharkValue::create_phi(
+ type, builder()->CreatePHI(SharkType::to_stackType(type), name));
+ break;
+
+ case T_ADDRESS:
+ value = SharkValue::address_constant(type->as_return_address()->bci());
+ break;
+
+ case ciTypeFlow::StateVector::T_BOTTOM:
+ break;
+
+ case ciTypeFlow::StateVector::T_LONG2:
+ case ciTypeFlow::StateVector::T_DOUBLE2:
+ break;
+
+ default:
+ ShouldNotReachHere();
+ }
+ set_local(i, value);
+ }
+
+ // Expression stack
+ for (int i = 0; i < block->stack_depth_at_entry(); i++) {
+ ciType *type = block->stack_type_at_entry(i);
+ if (type->basic_type() == (BasicType) ciTypeFlow::StateVector::T_NULL) {
+ // XXX we could do all kinds of clever stuff here
+ type = ciType::make(T_OBJECT); // XXX what about T_ARRAY?
+ }
+
+ SharkValue *value = NULL;
+ switch (type->basic_type()) {
+ case T_INT:
+ case T_LONG:
+ case T_FLOAT:
+ case T_DOUBLE:
+ case T_OBJECT:
+ case T_ARRAY:
+ snprintf(name, sizeof(name), "stack_%d_", i);
+ value = SharkValue::create_phi(
+ type, builder()->CreatePHI(SharkType::to_stackType(type), name));
+ break;
+
+ case T_ADDRESS:
+ value = SharkValue::address_constant(type->as_return_address()->bci());
+ break;
+
+ case ciTypeFlow::StateVector::T_LONG2:
+ case ciTypeFlow::StateVector::T_DOUBLE2:
+ break;
+
+ default:
+ ShouldNotReachHere();
+ }
+ push(value);
+ }
+
+ // Monitors
+ set_num_monitors(block->ciblock()->monitor_count());
+
+ builder()->SetInsertPoint(saved_insert_point);
+}
+
+void SharkPHIState::add_incoming(SharkState* incoming_state) {
+ BasicBlock *predecessor = builder()->GetInsertBlock();
+
+ // Method
+ ((PHINode *) method())->addIncoming(incoming_state->method(), predecessor);
+
+ // Local variables
+ for (int i = 0; i < max_locals(); i++) {
+ if (local(i) != NULL)
+ local(i)->addIncoming(incoming_state->local(i), predecessor);
+ }
+
+ // Expression stack
+ int stack_depth = block()->stack_depth_at_entry();
+ assert(stack_depth == incoming_state->stack_depth(), "should be");
+ for (int i = 0; i < stack_depth; i++) {
+ assert((stack(i) == NULL) == (incoming_state->stack(i) == NULL), "oops");
+ if (stack(i))
+ stack(i)->addIncoming(incoming_state->stack(i), predecessor);
+ }
+
+ // Monitors
+ assert(num_monitors() == incoming_state->num_monitors(), "should be");
+
+ // Temporary oop slot
+ assert(oop_tmp() == incoming_state->oop_tmp(), "should be");
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkState.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+class SharkState : public SharkTargetInvariants {
+ public:
+ SharkState(const SharkTargetInvariants* parent)
+ : SharkTargetInvariants(parent),
+ _method(NULL),
+ _oop_tmp(NULL),
+ _has_safepointed(false) { initialize(NULL); }
+
+ SharkState(const SharkState* state)
+ : SharkTargetInvariants(state),
+ _method(state->_method),
+ _oop_tmp(state->_oop_tmp),
+ _has_safepointed(state->_has_safepointed) { initialize(state); }
+
+ private:
+ void initialize(const SharkState* state);
+
+ private:
+ llvm::Value* _method;
+ SharkValue** _locals;
+ SharkValue** _stack;
+ SharkValue** _sp;
+ int _num_monitors;
+ llvm::Value* _oop_tmp;
+ bool _has_safepointed;
+
+ // Method
+ public:
+ llvm::Value** method_addr() {
+ return &_method;
+ }
+ llvm::Value* method() const {
+ return _method;
+ }
+ protected:
+ void set_method(llvm::Value* method) {
+ _method = method;
+ }
+
+ // Local variables
+ public:
+ SharkValue** local_addr(int index) const {
+ assert(index >= 0 && index < max_locals(), "bad local variable index");
+ return &_locals[index];
+ }
+ SharkValue* local(int index) const {
+ return *local_addr(index);
+ }
+ void set_local(int index, SharkValue* value) {
+ *local_addr(index) = value;
+ }
+
+ // Expression stack
+ public:
+ SharkValue** stack_addr(int slot) const {
+ assert(slot >= 0 && slot < stack_depth(), "bad stack slot");
+ return &_sp[-(slot + 1)];
+ }
+ SharkValue* stack(int slot) const {
+ return *stack_addr(slot);
+ }
+ protected:
+ void set_stack(int slot, SharkValue* value) {
+ *stack_addr(slot) = value;
+ }
+ public:
+ int stack_depth() const {
+ return _sp - _stack;
+ }
+ void push(SharkValue* value) {
+ assert(stack_depth() < max_stack(), "stack overrun");
+ *(_sp++) = value;
+ }
+ SharkValue* pop() {
+ assert(stack_depth() > 0, "stack underrun");
+ return *(--_sp);
+ }
+
+ // Monitors
+ public:
+ int num_monitors() const {
+ return _num_monitors;
+ }
+ void set_num_monitors(int num_monitors) {
+ _num_monitors = num_monitors;
+ }
+
+ // Temporary oop slot
+ public:
+ llvm::Value** oop_tmp_addr() {
+ return &_oop_tmp;
+ }
+ llvm::Value* oop_tmp() const {
+ return _oop_tmp;
+ }
+ void set_oop_tmp(llvm::Value* oop_tmp) {
+ _oop_tmp = oop_tmp;
+ }
+
+ // Safepointed status
+ public:
+ bool has_safepointed() const {
+ return _has_safepointed;
+ }
+ void set_has_safepointed(bool has_safepointed) {
+ _has_safepointed = has_safepointed;
+ }
+
+ // Comparison
+ public:
+ bool equal_to(SharkState* other);
+
+ // Copy and merge
+ public:
+ SharkState* copy() const {
+ return new SharkState(this);
+ }
+ void merge(SharkState* other,
+ llvm::BasicBlock* other_block,
+ llvm::BasicBlock* this_block);
+
+ // Value replacement
+ public:
+ void replace_all(SharkValue* old_value, SharkValue* new_value);
+};
+
+class SharkTopLevelBlock;
+
+// SharkNormalEntryState objects are used to create the state
+// that the method will be entered with for a normal invocation.
+class SharkNormalEntryState : public SharkState {
+ public:
+ SharkNormalEntryState(SharkTopLevelBlock* block,
+ llvm::Value* method);
+};
+
+// SharkOSREntryState objects are used to create the state
+// that the method will be entered with for an OSR invocation.
+class SharkOSREntryState : public SharkState {
+ public:
+ SharkOSREntryState(SharkTopLevelBlock* block,
+ llvm::Value* method,
+ llvm::Value* osr_buf);
+};
+
+// SharkPHIState objects are used to manage the entry state
+// for blocks with more than one entry path or for blocks
+// entered from blocks that will be compiled later.
+class SharkPHIState : public SharkState {
+ public:
+ SharkPHIState(SharkTopLevelBlock* block);
+
+ private:
+ SharkTopLevelBlock* _block;
+
+ private:
+ SharkTopLevelBlock* block() const {
+ return _block;
+ }
+
+ public:
+ void add_incoming(SharkState* incoming_state);
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkStateScanner.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharkStateScanner.cpp.incl"
+
+using namespace llvm;
+
+void SharkStateScanner::scan(SharkState* state) {
+ start_frame();
+
+ // Expression stack
+ stack_integrity_checks(state);
+ start_stack(state->stack_depth());
+ for (int i = state->stack_depth() - 1; i >= 0; i--) {
+ process_stack_slot(
+ i,
+ state->stack_addr(i),
+ stack()->stack_slots_offset() +
+ i + max_stack() - state->stack_depth());
+ }
+ end_stack();
+
+ // Monitors
+ start_monitors(state->num_monitors());
+ for (int i = 0; i < state->num_monitors(); i++) {
+ process_monitor(
+ i,
+ stack()->monitor_offset(i),
+ stack()->monitor_object_offset(i));
+ }
+ end_monitors();
+
+ // Frame header
+ start_frame_header();
+ process_oop_tmp_slot(
+ state->oop_tmp_addr(), stack()->oop_tmp_slot_offset());
+ process_method_slot(state->method_addr(), stack()->method_slot_offset());
+ process_pc_slot(stack()->pc_slot_offset());
+ end_frame_header();
+
+ // Local variables
+ locals_integrity_checks(state);
+ start_locals();
+ for (int i = 0; i < max_locals(); i++) {
+ process_local_slot(
+ i,
+ state->local_addr(i),
+ stack()->locals_slots_offset() + max_locals() - 1 - i);
+ }
+ end_locals();
+
+ end_frame();
+}
+
+#ifndef PRODUCT
+void SharkStateScanner::stack_integrity_checks(SharkState* state) {
+ for (int i = 0; i < state->stack_depth(); i++) {
+ if (state->stack(i)) {
+ if (state->stack(i)->is_two_word())
+ assert(state->stack(i - 1) == NULL, "should be");
+ }
+ else {
+ assert(state->stack(i + 1)->is_two_word(), "should be");
+ }
+ }
+}
+
+void SharkStateScanner::locals_integrity_checks(SharkState* state) {
+ for (int i = 0; i < max_locals(); i++) {
+ if (state->local(i)) {
+ if (state->local(i)->is_two_word())
+ assert(state->local(i + 1) == NULL, "should be");
+ }
+ }
+}
+#endif // !PRODUCT
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkStateScanner.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+class SharkState;
+
+class SharkStateScanner : public SharkTargetInvariants {
+ protected:
+ SharkStateScanner(SharkFunction* function)
+ : SharkTargetInvariants(function), _stack(function->stack()) {}
+
+ private:
+ SharkStack* _stack;
+
+ protected:
+ SharkStack* stack() const {
+ return _stack;
+ }
+
+ // Scan the frame
+ public:
+ void scan(SharkState* state);
+
+ // Callbacks
+ // Note that the offsets supplied to the various process_* callbacks
+ // are specified in wordSize words from the frame's unextended_sp.
+ protected:
+ virtual void start_frame() {}
+
+ virtual void start_stack(int stack_depth) {}
+ virtual void process_stack_slot(int index, SharkValue** value, int offset) {}
+ virtual void end_stack() {}
+
+ virtual void start_monitors(int num_monitors) {}
+ virtual void process_monitor(int index, int box_offset, int obj_offset) {}
+ virtual void end_monitors() {}
+
+ virtual void start_frame_header() {}
+ virtual void process_oop_tmp_slot(llvm::Value** value, int offset) {}
+ virtual void process_method_slot(llvm::Value** value, int offset) {}
+ virtual void process_pc_slot(int offset) {}
+ virtual void end_frame_header() {}
+
+ virtual void start_locals() {}
+ virtual void process_local_slot(int index, SharkValue** value, int offset) {}
+ virtual void end_locals() {}
+
+ virtual void end_frame() {}
+
+ // Integrity checks
+ private:
+ void stack_integrity_checks(SharkState* state) PRODUCT_RETURN;
+ void locals_integrity_checks(SharkState* state) PRODUCT_RETURN;
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkTopLevelBlock.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,1995 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009, 2010 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharkTopLevelBlock.cpp.incl"
+
+using namespace llvm;
+
+void SharkTopLevelBlock::scan_for_traps() {
+ // If typeflow found a trap then don't scan past it
+ int limit_bci = ciblock()->has_trap() ? ciblock()->trap_bci() : limit();
+
+ // Scan the bytecode for traps that are always hit
+ iter()->reset_to_bci(start());
+ while (iter()->next_bci() < limit_bci) {
+ iter()->next();
+
+ ciField *field;
+ ciMethod *method;
+ ciInstanceKlass *klass;
+ bool will_link;
+ bool is_field;
+
+ switch (bc()) {
+ case Bytecodes::_ldc:
+ case Bytecodes::_ldc_w:
+ if (!SharkConstant::for_ldc(iter())->is_loaded()) {
+ set_trap(
+ Deoptimization::make_trap_request(
+ Deoptimization::Reason_uninitialized,
+ Deoptimization::Action_reinterpret), bci());
+ return;
+ }
+ break;
+
+ case Bytecodes::_getfield:
+ case Bytecodes::_getstatic:
+ case Bytecodes::_putfield:
+ case Bytecodes::_putstatic:
+ field = iter()->get_field(will_link);
+ assert(will_link, "typeflow responsibility");
+ is_field = (bc() == Bytecodes::_getfield || bc() == Bytecodes::_putfield);
+
+ // If the bytecode does not match the field then bail out to
+ // the interpreter to throw an IncompatibleClassChangeError
+ if (is_field == field->is_static()) {
+ set_trap(
+ Deoptimization::make_trap_request(
+ Deoptimization::Reason_unhandled,
+ Deoptimization::Action_none), bci());
+ return;
+ }
+
+ // Bail out if we are trying to access a static variable
+ // before the class initializer has completed.
+ if (!is_field && !field->holder()->is_initialized()) {
+ if (!static_field_ok_in_clinit(field)) {
+ set_trap(
+ Deoptimization::make_trap_request(
+ Deoptimization::Reason_uninitialized,
+ Deoptimization::Action_reinterpret), bci());
+ return;
+ }
+ }
+ break;
+
+ case Bytecodes::_invokestatic:
+ case Bytecodes::_invokespecial:
+ case Bytecodes::_invokevirtual:
+ case Bytecodes::_invokeinterface:
+ method = iter()->get_method(will_link);
+ assert(will_link, "typeflow responsibility");
+
+ if (!method->holder()->is_linked()) {
+ set_trap(
+ Deoptimization::make_trap_request(
+ Deoptimization::Reason_uninitialized,
+ Deoptimization::Action_reinterpret), bci());
+ return;
+ }
+
+ if (bc() == Bytecodes::_invokevirtual) {
+ klass = ciEnv::get_instance_klass_for_declared_method_holder(
+ iter()->get_declared_method_holder());
+ if (!klass->is_linked()) {
+ set_trap(
+ Deoptimization::make_trap_request(
+ Deoptimization::Reason_uninitialized,
+ Deoptimization::Action_reinterpret), bci());
+ return;
+ }
+ }
+ break;
+
+ case Bytecodes::_new:
+ klass = iter()->get_klass(will_link)->as_instance_klass();
+ assert(will_link, "typeflow responsibility");
+
+ // Bail out if the class is unloaded
+ if (iter()->is_unresolved_klass() || !klass->is_initialized()) {
+ set_trap(
+ Deoptimization::make_trap_request(
+ Deoptimization::Reason_uninitialized,
+ Deoptimization::Action_reinterpret), bci());
+ return;
+ }
+
+ // Bail out if the class cannot be instantiated
+ if (klass->is_abstract() || klass->is_interface() ||
+ klass->name() == ciSymbol::java_lang_Class()) {
+ set_trap(
+ Deoptimization::make_trap_request(
+ Deoptimization::Reason_unhandled,
+ Deoptimization::Action_reinterpret), bci());
+ return;
+ }
+ break;
+ }
+ }
+
+ // Trap if typeflow trapped (and we didn't before)
+ if (ciblock()->has_trap()) {
+ set_trap(
+ Deoptimization::make_trap_request(
+ Deoptimization::Reason_unloaded,
+ Deoptimization::Action_reinterpret,
+ ciblock()->trap_index()), ciblock()->trap_bci());
+ return;
+ }
+}
+
+bool SharkTopLevelBlock::static_field_ok_in_clinit(ciField* field) {
+ assert(field->is_static(), "should be");
+
+ // This code is lifted pretty much verbatim from C2's
+ // Parse::static_field_ok_in_clinit() in parse3.cpp.
+ bool access_OK = false;
+ if (target()->holder()->is_subclass_of(field->holder())) {
+ if (target()->is_static()) {
+ if (target()->name() == ciSymbol::class_initializer_name()) {
+ // It's OK to access static fields from the class initializer
+ access_OK = true;
+ }
+ }
+ else {
+ if (target()->name() == ciSymbol::object_initializer_name()) {
+ // It's also OK to access static fields inside a constructor,
+ // because any thread calling the constructor must first have
+ // synchronized on the class by executing a "new" bytecode.
+ access_OK = true;
+ }
+ }
+ }
+ return access_OK;
+}
+
+SharkState* SharkTopLevelBlock::entry_state() {
+ if (_entry_state == NULL) {
+ assert(needs_phis(), "should do");
+ _entry_state = new SharkPHIState(this);
+ }
+ return _entry_state;
+}
+
+void SharkTopLevelBlock::add_incoming(SharkState* incoming_state) {
+ if (needs_phis()) {
+ ((SharkPHIState *) entry_state())->add_incoming(incoming_state);
+ }
+ else if (_entry_state == NULL) {
+ _entry_state = incoming_state;
+ }
+ else {
+ assert(entry_state()->equal_to(incoming_state), "should be");
+ }
+}
+
+void SharkTopLevelBlock::enter(SharkTopLevelBlock* predecessor,
+ bool is_exception) {
+ // This block requires phis:
+ // - if it is entered more than once
+ // - if it is an exception handler, because in which
+ // case we assume it's entered more than once.
+ // - if the predecessor will be compiled after this
+ // block, in which case we can't simple propagate
+ // the state forward.
+ if (!needs_phis() &&
+ (entered() ||
+ is_exception ||
+ (predecessor && predecessor->index() >= index())))
+ _needs_phis = true;
+
+ // Recurse into the tree
+ if (!entered()) {
+ _entered = true;
+
+ scan_for_traps();
+ if (!has_trap()) {
+ for (int i = 0; i < num_successors(); i++) {
+ successor(i)->enter(this, false);
+ }
+ }
+ compute_exceptions();
+ for (int i = 0; i < num_exceptions(); i++) {
+ SharkTopLevelBlock *handler = exception(i);
+ if (handler)
+ handler->enter(this, true);
+ }
+ }
+}
+
+void SharkTopLevelBlock::initialize() {
+ char name[28];
+ snprintf(name, sizeof(name),
+ "bci_%d%s",
+ start(), is_backedge_copy() ? "_backedge_copy" : "");
+ _entry_block = function()->CreateBlock(name);
+}
+
+void SharkTopLevelBlock::decache_for_Java_call(ciMethod *callee) {
+ SharkJavaCallDecacher(function(), bci(), callee).scan(current_state());
+ for (int i = 0; i < callee->arg_size(); i++)
+ xpop();
+}
+
+void SharkTopLevelBlock::cache_after_Java_call(ciMethod *callee) {
+ if (callee->return_type()->size()) {
+ ciType *type;
+ switch (callee->return_type()->basic_type()) {
+ case T_BOOLEAN:
+ case T_BYTE:
+ case T_CHAR:
+ case T_SHORT:
+ type = ciType::make(T_INT);
+ break;
+
+ default:
+ type = callee->return_type();
+ }
+
+ push(SharkValue::create_generic(type, NULL, false));
+ }
+ SharkJavaCallCacher(function(), callee).scan(current_state());
+}
+
+void SharkTopLevelBlock::decache_for_VM_call() {
+ SharkVMCallDecacher(function(), bci()).scan(current_state());
+}
+
+void SharkTopLevelBlock::cache_after_VM_call() {
+ SharkVMCallCacher(function()).scan(current_state());
+}
+
+void SharkTopLevelBlock::decache_for_trap() {
+ SharkTrapDecacher(function(), bci()).scan(current_state());
+}
+
+void SharkTopLevelBlock::emit_IR() {
+ builder()->SetInsertPoint(entry_block());
+
+ // Parse the bytecode
+ parse_bytecode(start(), limit());
+
+ // If this block falls through to the next then it won't have been
+ // terminated by a bytecode and we have to add the branch ourselves
+ if (falls_through() && !has_trap())
+ do_branch(ciTypeFlow::FALL_THROUGH);
+}
+
+SharkTopLevelBlock* SharkTopLevelBlock::bci_successor(int bci) const {
+ // XXX now with Linear Search Technology (tm)
+ for (int i = 0; i < num_successors(); i++) {
+ ciTypeFlow::Block *successor = ciblock()->successors()->at(i);
+ if (successor->start() == bci)
+ return function()->block(successor->pre_order());
+ }
+ ShouldNotReachHere();
+}
+
+void SharkTopLevelBlock::do_zero_check(SharkValue *value) {
+ if (value->is_phi() && value->as_phi()->all_incomers_zero_checked()) {
+ function()->add_deferred_zero_check(this, value);
+ }
+ else {
+ BasicBlock *continue_block = function()->CreateBlock("not_zero");
+ SharkState *saved_state = current_state();
+ set_current_state(saved_state->copy());
+ zero_check_value(value, continue_block);
+ builder()->SetInsertPoint(continue_block);
+ set_current_state(saved_state);
+ }
+
+ value->set_zero_checked(true);
+}
+
+void SharkTopLevelBlock::do_deferred_zero_check(SharkValue* value,
+ int bci,
+ SharkState* saved_state,
+ BasicBlock* continue_block) {
+ if (value->as_phi()->all_incomers_zero_checked()) {
+ builder()->CreateBr(continue_block);
+ }
+ else {
+ iter()->force_bci(start());
+ set_current_state(saved_state);
+ zero_check_value(value, continue_block);
+ }
+}
+
+void SharkTopLevelBlock::zero_check_value(SharkValue* value,
+ BasicBlock* continue_block) {
+ BasicBlock *zero_block = builder()->CreateBlock(continue_block, "zero");
+
+ Value *a, *b;
+ switch (value->basic_type()) {
+ case T_BYTE:
+ case T_CHAR:
+ case T_SHORT:
+ case T_INT:
+ a = value->jint_value();
+ b = LLVMValue::jint_constant(0);
+ break;
+ case T_LONG:
+ a = value->jlong_value();
+ b = LLVMValue::jlong_constant(0);
+ break;
+ case T_OBJECT:
+ case T_ARRAY:
+ a = value->jobject_value();
+ b = LLVMValue::LLVMValue::null();
+ break;
+ default:
+ tty->print_cr("Unhandled type %s", type2name(value->basic_type()));
+ ShouldNotReachHere();
+ }
+
+ builder()->CreateCondBr(
+ builder()->CreateICmpNE(a, b), continue_block, zero_block);
+
+ builder()->SetInsertPoint(zero_block);
+ if (value->is_jobject()) {
+ call_vm(
+ builder()->throw_NullPointerException(),
+ builder()->CreateIntToPtr(
+ LLVMValue::intptr_constant((intptr_t) __FILE__),
+ PointerType::getUnqual(SharkType::jbyte_type())),
+ LLVMValue::jint_constant(__LINE__),
+ EX_CHECK_NONE);
+ }
+ else {
+ call_vm(
+ builder()->throw_ArithmeticException(),
+ builder()->CreateIntToPtr(
+ LLVMValue::intptr_constant((intptr_t) __FILE__),
+ PointerType::getUnqual(SharkType::jbyte_type())),
+ LLVMValue::jint_constant(__LINE__),
+ EX_CHECK_NONE);
+ }
+
+ Value *pending_exception = get_pending_exception();
+ clear_pending_exception();
+ handle_exception(pending_exception, EX_CHECK_FULL);
+}
+
+void SharkTopLevelBlock::check_bounds(SharkValue* array, SharkValue* index) {
+ BasicBlock *out_of_bounds = function()->CreateBlock("out_of_bounds");
+ BasicBlock *in_bounds = function()->CreateBlock("in_bounds");
+
+ Value *length = builder()->CreateArrayLength(array->jarray_value());
+ // we use an unsigned comparison to catch negative values
+ builder()->CreateCondBr(
+ builder()->CreateICmpULT(index->jint_value(), length),
+ in_bounds, out_of_bounds);
+
+ builder()->SetInsertPoint(out_of_bounds);
+ SharkState *saved_state = current_state()->copy();
+
+ call_vm(
+ builder()->throw_ArrayIndexOutOfBoundsException(),
+ builder()->CreateIntToPtr(
+ LLVMValue::intptr_constant((intptr_t) __FILE__),
+ PointerType::getUnqual(SharkType::jbyte_type())),
+ LLVMValue::jint_constant(__LINE__),
+ index->jint_value(),
+ EX_CHECK_NONE);
+
+ Value *pending_exception = get_pending_exception();
+ clear_pending_exception();
+ handle_exception(pending_exception, EX_CHECK_FULL);
+
+ set_current_state(saved_state);
+
+ builder()->SetInsertPoint(in_bounds);
+}
+
+void SharkTopLevelBlock::check_pending_exception(int action) {
+ assert(action & EAM_CHECK, "should be");
+
+ BasicBlock *exception = function()->CreateBlock("exception");
+ BasicBlock *no_exception = function()->CreateBlock("no_exception");
+
+ Value *pending_exception = get_pending_exception();
+ builder()->CreateCondBr(
+ builder()->CreateICmpEQ(pending_exception, LLVMValue::null()),
+ no_exception, exception);
+
+ builder()->SetInsertPoint(exception);
+ SharkState *saved_state = current_state()->copy();
+ if (action & EAM_MONITOR_FUDGE) {
+ // The top monitor is marked live, but the exception was thrown
+ // while setting it up so we need to mark it dead before we enter
+ // any exception handlers as they will not expect it to be there.
+ set_num_monitors(num_monitors() - 1);
+ action ^= EAM_MONITOR_FUDGE;
+ }
+ clear_pending_exception();
+ handle_exception(pending_exception, action);
+ set_current_state(saved_state);
+
+ builder()->SetInsertPoint(no_exception);
+}
+
+void SharkTopLevelBlock::compute_exceptions() {
+ ciExceptionHandlerStream str(target(), start());
+
+ int exc_count = str.count();
+ _exc_handlers = new GrowableArray<ciExceptionHandler*>(exc_count);
+ _exceptions = new GrowableArray<SharkTopLevelBlock*>(exc_count);
+
+ int index = 0;
+ for (; !str.is_done(); str.next()) {
+ ciExceptionHandler *handler = str.handler();
+ if (handler->handler_bci() == -1)
+ break;
+ _exc_handlers->append(handler);
+
+ // Try and get this exception's handler from typeflow. We should
+ // do it this way always, really, except that typeflow sometimes
+ // doesn't record exceptions, even loaded ones, and sometimes it
+ // returns them with a different handler bci. Why???
+ SharkTopLevelBlock *block = NULL;
+ ciInstanceKlass* klass;
+ if (handler->is_catch_all()) {
+ klass = java_lang_Throwable_klass();
+ }
+ else {
+ klass = handler->catch_klass();
+ }
+ for (int i = 0; i < ciblock()->exceptions()->length(); i++) {
+ if (klass == ciblock()->exc_klasses()->at(i)) {
+ block = function()->block(ciblock()->exceptions()->at(i)->pre_order());
+ if (block->start() == handler->handler_bci())
+ break;
+ else
+ block = NULL;
+ }
+ }
+
+ // If typeflow let us down then try and figure it out ourselves
+ if (block == NULL) {
+ for (int i = 0; i < function()->block_count(); i++) {
+ SharkTopLevelBlock *candidate = function()->block(i);
+ if (candidate->start() == handler->handler_bci()) {
+ if (block != NULL) {
+ NOT_PRODUCT(warning("there may be trouble ahead"));
+ block = NULL;
+ break;
+ }
+ block = candidate;
+ }
+ }
+ }
+ _exceptions->append(block);
+ }
+}
+
+void SharkTopLevelBlock::handle_exception(Value* exception, int action) {
+ if (action & EAM_HANDLE && num_exceptions() != 0) {
+ // Clear the stack and push the exception onto it
+ while (xstack_depth())
+ pop();
+ push(SharkValue::create_jobject(exception, true));
+
+ // Work out how many options we have to check
+ bool has_catch_all = exc_handler(num_exceptions() - 1)->is_catch_all();
+ int num_options = num_exceptions();
+ if (has_catch_all)
+ num_options--;
+
+ // Marshal any non-catch-all handlers
+ if (num_options > 0) {
+ bool all_loaded = true;
+ for (int i = 0; i < num_options; i++) {
+ if (!exc_handler(i)->catch_klass()->is_loaded()) {
+ all_loaded = false;
+ break;
+ }
+ }
+
+ if (all_loaded)
+ marshal_exception_fast(num_options);
+ else
+ marshal_exception_slow(num_options);
+ }
+
+ // Install the catch-all handler, if present
+ if (has_catch_all) {
+ SharkTopLevelBlock* handler = this->exception(num_options);
+ assert(handler != NULL, "catch-all handler cannot be unloaded");
+
+ builder()->CreateBr(handler->entry_block());
+ handler->add_incoming(current_state());
+ return;
+ }
+ }
+
+ // No exception handler was found; unwind and return
+ handle_return(T_VOID, exception);
+}
+
+void SharkTopLevelBlock::marshal_exception_fast(int num_options) {
+ Value *exception_klass = builder()->CreateValueOfStructEntry(
+ xstack(0)->jobject_value(),
+ in_ByteSize(oopDesc::klass_offset_in_bytes()),
+ SharkType::oop_type(),
+ "exception_klass");
+
+ for (int i = 0; i < num_options; i++) {
+ Value *check_klass =
+ builder()->CreateInlineOop(exc_handler(i)->catch_klass());
+
+ BasicBlock *not_exact = function()->CreateBlock("not_exact");
+ BasicBlock *not_subtype = function()->CreateBlock("not_subtype");
+
+ builder()->CreateCondBr(
+ builder()->CreateICmpEQ(check_klass, exception_klass),
+ handler_for_exception(i), not_exact);
+
+ builder()->SetInsertPoint(not_exact);
+ builder()->CreateCondBr(
+ builder()->CreateICmpNE(
+ builder()->CreateCall2(
+ builder()->is_subtype_of(), check_klass, exception_klass),
+ LLVMValue::jbyte_constant(0)),
+ handler_for_exception(i), not_subtype);
+
+ builder()->SetInsertPoint(not_subtype);
+ }
+}
+
+void SharkTopLevelBlock::marshal_exception_slow(int num_options) {
+ int *indexes = NEW_RESOURCE_ARRAY(int, num_options);
+ for (int i = 0; i < num_options; i++)
+ indexes[i] = exc_handler(i)->catch_klass_index();
+
+ Value *index = call_vm(
+ builder()->find_exception_handler(),
+ builder()->CreateInlineData(
+ indexes,
+ num_options * sizeof(int),
+ PointerType::getUnqual(SharkType::jint_type())),
+ LLVMValue::jint_constant(num_options),
+ EX_CHECK_NO_CATCH);
+
+ BasicBlock *no_handler = function()->CreateBlock("no_handler");
+ SwitchInst *switchinst = builder()->CreateSwitch(
+ index, no_handler, num_options);
+
+ for (int i = 0; i < num_options; i++) {
+ switchinst->addCase(
+ LLVMValue::jint_constant(i),
+ handler_for_exception(i));
+ }
+
+ builder()->SetInsertPoint(no_handler);
+}
+
+BasicBlock* SharkTopLevelBlock::handler_for_exception(int index) {
+ SharkTopLevelBlock *successor = this->exception(index);
+ if (successor) {
+ successor->add_incoming(current_state());
+ return successor->entry_block();
+ }
+ else {
+ return make_trap(
+ exc_handler(index)->handler_bci(),
+ Deoptimization::make_trap_request(
+ Deoptimization::Reason_unhandled,
+ Deoptimization::Action_reinterpret));
+ }
+}
+
+void SharkTopLevelBlock::maybe_add_safepoint() {
+ if (current_state()->has_safepointed())
+ return;
+
+ BasicBlock *orig_block = builder()->GetInsertBlock();
+ SharkState *orig_state = current_state()->copy();
+
+ BasicBlock *do_safepoint = function()->CreateBlock("do_safepoint");
+ BasicBlock *safepointed = function()->CreateBlock("safepointed");
+
+ Value *state = builder()->CreateLoad(
+ builder()->CreateIntToPtr(
+ LLVMValue::intptr_constant(
+ (intptr_t) SafepointSynchronize::address_of_state()),
+ PointerType::getUnqual(SharkType::jint_type())),
+ "state");
+
+ builder()->CreateCondBr(
+ builder()->CreateICmpEQ(
+ state,
+ LLVMValue::jint_constant(SafepointSynchronize::_synchronizing)),
+ do_safepoint, safepointed);
+
+ builder()->SetInsertPoint(do_safepoint);
+ call_vm(builder()->safepoint(), EX_CHECK_FULL);
+ BasicBlock *safepointed_block = builder()->GetInsertBlock();
+ builder()->CreateBr(safepointed);
+
+ builder()->SetInsertPoint(safepointed);
+ current_state()->merge(orig_state, orig_block, safepointed_block);
+
+ current_state()->set_has_safepointed(true);
+}
+
+void SharkTopLevelBlock::maybe_add_backedge_safepoint() {
+ if (current_state()->has_safepointed())
+ return;
+
+ for (int i = 0; i < num_successors(); i++) {
+ if (successor(i)->can_reach(this)) {
+ maybe_add_safepoint();
+ break;
+ }
+ }
+}
+
+bool SharkTopLevelBlock::can_reach(SharkTopLevelBlock* other) {
+ for (int i = 0; i < function()->block_count(); i++)
+ function()->block(i)->_can_reach_visited = false;
+
+ return can_reach_helper(other);
+}
+
+bool SharkTopLevelBlock::can_reach_helper(SharkTopLevelBlock* other) {
+ if (this == other)
+ return true;
+
+ if (_can_reach_visited)
+ return false;
+ _can_reach_visited = true;
+
+ if (!has_trap()) {
+ for (int i = 0; i < num_successors(); i++) {
+ if (successor(i)->can_reach_helper(other))
+ return true;
+ }
+ }
+
+ for (int i = 0; i < num_exceptions(); i++) {
+ SharkTopLevelBlock *handler = exception(i);
+ if (handler && handler->can_reach_helper(other))
+ return true;
+ }
+
+ return false;
+}
+
+BasicBlock* SharkTopLevelBlock::make_trap(int trap_bci, int trap_request) {
+ BasicBlock *trap_block = function()->CreateBlock("trap");
+ BasicBlock *orig_block = builder()->GetInsertBlock();
+ builder()->SetInsertPoint(trap_block);
+
+ int orig_bci = bci();
+ iter()->force_bci(trap_bci);
+
+ do_trap(trap_request);
+
+ builder()->SetInsertPoint(orig_block);
+ iter()->force_bci(orig_bci);
+
+ return trap_block;
+}
+
+void SharkTopLevelBlock::do_trap(int trap_request) {
+ decache_for_trap();
+ builder()->CreateRet(
+ builder()->CreateCall2(
+ builder()->uncommon_trap(),
+ thread(),
+ LLVMValue::jint_constant(trap_request)));
+}
+
+void SharkTopLevelBlock::call_register_finalizer(Value *receiver) {
+ BasicBlock *orig_block = builder()->GetInsertBlock();
+ SharkState *orig_state = current_state()->copy();
+
+ BasicBlock *do_call = function()->CreateBlock("has_finalizer");
+ BasicBlock *done = function()->CreateBlock("done");
+
+ Value *klass = builder()->CreateValueOfStructEntry(
+ receiver,
+ in_ByteSize(oopDesc::klass_offset_in_bytes()),
+ SharkType::oop_type(),
+ "klass");
+
+ Value *klass_part = builder()->CreateAddressOfStructEntry(
+ klass,
+ in_ByteSize(klassOopDesc::klass_part_offset_in_bytes()),
+ SharkType::klass_type(),
+ "klass_part");
+
+ Value *access_flags = builder()->CreateValueOfStructEntry(
+ klass_part,
+ in_ByteSize(Klass::access_flags_offset_in_bytes()),
+ SharkType::jint_type(),
+ "access_flags");
+
+ builder()->CreateCondBr(
+ builder()->CreateICmpNE(
+ builder()->CreateAnd(
+ access_flags,
+ LLVMValue::jint_constant(JVM_ACC_HAS_FINALIZER)),
+ LLVMValue::jint_constant(0)),
+ do_call, done);
+
+ builder()->SetInsertPoint(do_call);
+ call_vm(builder()->register_finalizer(), receiver, EX_CHECK_FULL);
+ BasicBlock *branch_block = builder()->GetInsertBlock();
+ builder()->CreateBr(done);
+
+ builder()->SetInsertPoint(done);
+ current_state()->merge(orig_state, orig_block, branch_block);
+}
+
+void SharkTopLevelBlock::handle_return(BasicType type, Value* exception) {
+ assert (exception == NULL || type == T_VOID, "exception OR result, please");
+
+ if (num_monitors()) {
+ // Protect our exception across possible monitor release decaches
+ if (exception)
+ set_oop_tmp(exception);
+
+ // We don't need to check for exceptions thrown here. If
+ // we're returning a value then we just carry on as normal:
+ // the caller will see the pending exception and handle it.
+ // If we're returning with an exception then that exception
+ // takes priority and the release_lock one will be ignored.
+ while (num_monitors())
+ release_lock(EX_CHECK_NONE);
+
+ // Reload the exception we're throwing
+ if (exception)
+ exception = get_oop_tmp();
+ }
+
+ if (exception) {
+ builder()->CreateStore(exception, pending_exception_address());
+ }
+
+ Value *result_addr = stack()->CreatePopFrame(type2size[type]);
+ if (type != T_VOID) {
+ builder()->CreateStore(
+ pop_result(type)->generic_value(),
+ builder()->CreateIntToPtr(
+ result_addr,
+ PointerType::getUnqual(SharkType::to_stackType(type))));
+ }
+
+ builder()->CreateRet(LLVMValue::jint_constant(0));
+}
+
+void SharkTopLevelBlock::do_arraylength() {
+ SharkValue *array = pop();
+ check_null(array);
+ Value *length = builder()->CreateArrayLength(array->jarray_value());
+ push(SharkValue::create_jint(length, false));
+}
+
+void SharkTopLevelBlock::do_aload(BasicType basic_type) {
+ SharkValue *index = pop();
+ SharkValue *array = pop();
+
+ check_null(array);
+ check_bounds(array, index);
+
+ Value *value = builder()->CreateLoad(
+ builder()->CreateArrayAddress(
+ array->jarray_value(), basic_type, index->jint_value()));
+
+ const Type *stack_type = SharkType::to_stackType(basic_type);
+ if (value->getType() != stack_type)
+ value = builder()->CreateIntCast(value, stack_type, basic_type != T_CHAR);
+
+ switch (basic_type) {
+ case T_BYTE:
+ case T_CHAR:
+ case T_SHORT:
+ case T_INT:
+ push(SharkValue::create_jint(value, false));
+ break;
+
+ case T_LONG:
+ push(SharkValue::create_jlong(value, false));
+ break;
+
+ case T_FLOAT:
+ push(SharkValue::create_jfloat(value));
+ break;
+
+ case T_DOUBLE:
+ push(SharkValue::create_jdouble(value));
+ break;
+
+ case T_OBJECT:
+ // You might expect that array->type()->is_array_klass() would
+ // always be true, but it isn't. If ciTypeFlow detects that a
+ // value is always null then that value becomes an untyped null
+ // object. Shark doesn't presently support this, so a generic
+ // T_OBJECT is created. In this case we guess the type using
+ // the BasicType we were supplied. In reality the generated
+ // code will never be used, as the null value will be caught
+ // by the above null pointer check.
+ // http://icedtea.classpath.org/bugzilla/show_bug.cgi?id=324
+ push(
+ SharkValue::create_generic(
+ array->type()->is_array_klass() ?
+ ((ciArrayKlass *) array->type())->element_type() :
+ ciType::make(basic_type),
+ value, false));
+ break;
+
+ default:
+ tty->print_cr("Unhandled type %s", type2name(basic_type));
+ ShouldNotReachHere();
+ }
+}
+
+void SharkTopLevelBlock::do_astore(BasicType basic_type) {
+ SharkValue *svalue = pop();
+ SharkValue *index = pop();
+ SharkValue *array = pop();
+
+ check_null(array);
+ check_bounds(array, index);
+
+ Value *value;
+ switch (basic_type) {
+ case T_BYTE:
+ case T_CHAR:
+ case T_SHORT:
+ case T_INT:
+ value = svalue->jint_value();
+ break;
+
+ case T_LONG:
+ value = svalue->jlong_value();
+ break;
+
+ case T_FLOAT:
+ value = svalue->jfloat_value();
+ break;
+
+ case T_DOUBLE:
+ value = svalue->jdouble_value();
+ break;
+
+ case T_OBJECT:
+ value = svalue->jobject_value();
+ // XXX assignability check
+ break;
+
+ default:
+ tty->print_cr("Unhandled type %s", type2name(basic_type));
+ ShouldNotReachHere();
+ }
+
+ const Type *array_type = SharkType::to_arrayType(basic_type);
+ if (value->getType() != array_type)
+ value = builder()->CreateIntCast(value, array_type, basic_type != T_CHAR);
+
+ Value *addr = builder()->CreateArrayAddress(
+ array->jarray_value(), basic_type, index->jint_value(), "addr");
+
+ builder()->CreateStore(value, addr);
+
+ if (basic_type == T_OBJECT) // XXX or T_ARRAY?
+ builder()->CreateUpdateBarrierSet(oopDesc::bs(), addr);
+}
+
+void SharkTopLevelBlock::do_return(BasicType type) {
+ if (target()->intrinsic_id() == vmIntrinsics::_Object_init)
+ call_register_finalizer(local(0)->jobject_value());
+ maybe_add_safepoint();
+ handle_return(type, NULL);
+}
+
+void SharkTopLevelBlock::do_athrow() {
+ SharkValue *exception = pop();
+ check_null(exception);
+ handle_exception(exception->jobject_value(), EX_CHECK_FULL);
+}
+
+void SharkTopLevelBlock::do_goto() {
+ do_branch(ciTypeFlow::GOTO_TARGET);
+}
+
+void SharkTopLevelBlock::do_jsr() {
+ push(SharkValue::address_constant(iter()->next_bci()));
+ do_branch(ciTypeFlow::GOTO_TARGET);
+}
+
+void SharkTopLevelBlock::do_ret() {
+ assert(local(iter()->get_index())->address_value() ==
+ successor(ciTypeFlow::GOTO_TARGET)->start(), "should be");
+ do_branch(ciTypeFlow::GOTO_TARGET);
+}
+
+// All propagation of state from one block to the next (via
+// dest->add_incoming) is handled by these methods:
+// do_branch
+// do_if_helper
+// do_switch
+// handle_exception
+
+void SharkTopLevelBlock::do_branch(int successor_index) {
+ SharkTopLevelBlock *dest = successor(successor_index);
+ builder()->CreateBr(dest->entry_block());
+ dest->add_incoming(current_state());
+}
+
+void SharkTopLevelBlock::do_if(ICmpInst::Predicate p,
+ SharkValue* b,
+ SharkValue* a) {
+ Value *llvm_a, *llvm_b;
+ if (a->is_jobject()) {
+ llvm_a = a->intptr_value(builder());
+ llvm_b = b->intptr_value(builder());
+ }
+ else {
+ llvm_a = a->jint_value();
+ llvm_b = b->jint_value();
+ }
+ do_if_helper(p, llvm_b, llvm_a, current_state(), current_state());
+}
+
+void SharkTopLevelBlock::do_if_helper(ICmpInst::Predicate p,
+ Value* b,
+ Value* a,
+ SharkState* if_taken_state,
+ SharkState* not_taken_state) {
+ SharkTopLevelBlock *if_taken = successor(ciTypeFlow::IF_TAKEN);
+ SharkTopLevelBlock *not_taken = successor(ciTypeFlow::IF_NOT_TAKEN);
+
+ builder()->CreateCondBr(
+ builder()->CreateICmp(p, a, b),
+ if_taken->entry_block(), not_taken->entry_block());
+
+ if_taken->add_incoming(if_taken_state);
+ not_taken->add_incoming(not_taken_state);
+}
+
+void SharkTopLevelBlock::do_switch() {
+ int len = switch_table_length();
+
+ SharkTopLevelBlock *dest_block = successor(ciTypeFlow::SWITCH_DEFAULT);
+ SwitchInst *switchinst = builder()->CreateSwitch(
+ pop()->jint_value(), dest_block->entry_block(), len);
+ dest_block->add_incoming(current_state());
+
+ for (int i = 0; i < len; i++) {
+ int dest_bci = switch_dest(i);
+ if (dest_bci != switch_default_dest()) {
+ dest_block = bci_successor(dest_bci);
+ switchinst->addCase(
+ LLVMValue::jint_constant(switch_key(i)),
+ dest_block->entry_block());
+ dest_block->add_incoming(current_state());
+ }
+ }
+}
+
+ciMethod* SharkTopLevelBlock::improve_virtual_call(ciMethod* caller,
+ ciInstanceKlass* klass,
+ ciMethod* dest_method,
+ ciType* receiver_type) {
+ // If the method is obviously final then we are already done
+ if (dest_method->can_be_statically_bound())
+ return dest_method;
+
+ // Array methods are all inherited from Object and are monomorphic
+ if (receiver_type->is_array_klass() &&
+ dest_method->holder() == java_lang_Object_klass())
+ return dest_method;
+
+#ifdef SHARK_CAN_DEOPTIMIZE_ANYWHERE
+ // This code can replace a virtual call with a direct call if this
+ // class is the only one in the entire set of loaded classes that
+ // implements this method. This makes the compiled code dependent
+ // on other classes that implement the method not being loaded, a
+ // condition which is enforced by the dependency tracker. If the
+ // dependency tracker determines a method has become invalid it
+ // will mark it for recompilation, causing running copies to be
+ // deoptimized. Shark currently can't deoptimize arbitrarily like
+ // that, so this optimization cannot be used.
+ // http://icedtea.classpath.org/bugzilla/show_bug.cgi?id=481
+
+ // All other interesting cases are instance classes
+ if (!receiver_type->is_instance_klass())
+ return NULL;
+
+ // Attempt to improve the receiver
+ ciInstanceKlass* actual_receiver = klass;
+ ciInstanceKlass *improved_receiver = receiver_type->as_instance_klass();
+ if (improved_receiver->is_loaded() &&
+ improved_receiver->is_initialized() &&
+ !improved_receiver->is_interface() &&
+ improved_receiver->is_subtype_of(actual_receiver)) {
+ actual_receiver = improved_receiver;
+ }
+
+ // Attempt to find a monomorphic target for this call using
+ // class heirachy analysis.
+ ciInstanceKlass *calling_klass = caller->holder();
+ ciMethod* monomorphic_target =
+ dest_method->find_monomorphic_target(calling_klass, klass, actual_receiver);
+ if (monomorphic_target != NULL) {
+ assert(!monomorphic_target->is_abstract(), "shouldn't be");
+
+ // Opto has a bunch of type checking here that I don't
+ // understand. It's to inhibit casting in one direction,
+ // possibly because objects in Opto can have inexact
+ // types, but I can't even tell which direction it
+ // doesn't like. For now I'm going to block *any* cast.
+ if (monomorphic_target != dest_method) {
+ if (SharkPerformanceWarnings) {
+ warning("found monomorphic target, but inhibited cast:");
+ tty->print(" dest_method = ");
+ dest_method->print_short_name(tty);
+ tty->cr();
+ tty->print(" monomorphic_target = ");
+ monomorphic_target->print_short_name(tty);
+ tty->cr();
+ }
+ monomorphic_target = NULL;
+ }
+ }
+
+ // Replace the virtual call with a direct one. This makes
+ // us dependent on that target method not getting overridden
+ // by dynamic class loading.
+ if (monomorphic_target != NULL) {
+ dependencies()->assert_unique_concrete_method(
+ actual_receiver, monomorphic_target);
+ return monomorphic_target;
+ }
+
+ // Because Opto distinguishes exact types from inexact ones
+ // it can perform a further optimization to replace calls
+ // with non-monomorphic targets if the receiver has an exact
+ // type. We don't mark types this way, so we can't do this.
+
+#endif // SHARK_CAN_DEOPTIMIZE_ANYWHERE
+
+ return NULL;
+}
+
+Value *SharkTopLevelBlock::get_direct_callee(ciMethod* method) {
+ return builder()->CreateBitCast(
+ builder()->CreateInlineOop(method),
+ SharkType::methodOop_type(),
+ "callee");
+}
+
+Value *SharkTopLevelBlock::get_virtual_callee(SharkValue* receiver,
+ int vtable_index) {
+ Value *klass = builder()->CreateValueOfStructEntry(
+ receiver->jobject_value(),
+ in_ByteSize(oopDesc::klass_offset_in_bytes()),
+ SharkType::oop_type(),
+ "klass");
+
+ return builder()->CreateLoad(
+ builder()->CreateArrayAddress(
+ klass,
+ SharkType::methodOop_type(),
+ vtableEntry::size() * wordSize,
+ in_ByteSize(instanceKlass::vtable_start_offset() * wordSize),
+ LLVMValue::intptr_constant(vtable_index)),
+ "callee");
+}
+
+Value* SharkTopLevelBlock::get_interface_callee(SharkValue *receiver,
+ ciMethod* method) {
+ BasicBlock *loop = function()->CreateBlock("loop");
+ BasicBlock *got_null = function()->CreateBlock("got_null");
+ BasicBlock *not_null = function()->CreateBlock("not_null");
+ BasicBlock *next = function()->CreateBlock("next");
+ BasicBlock *got_entry = function()->CreateBlock("got_entry");
+
+ // Locate the receiver's itable
+ Value *object_klass = builder()->CreateValueOfStructEntry(
+ receiver->jobject_value(), in_ByteSize(oopDesc::klass_offset_in_bytes()),
+ SharkType::oop_type(),
+ "object_klass");
+
+ Value *vtable_start = builder()->CreateAdd(
+ builder()->CreatePtrToInt(object_klass, SharkType::intptr_type()),
+ LLVMValue::intptr_constant(
+ instanceKlass::vtable_start_offset() * HeapWordSize),
+ "vtable_start");
+
+ Value *vtable_length = builder()->CreateValueOfStructEntry(
+ object_klass,
+ in_ByteSize(instanceKlass::vtable_length_offset() * HeapWordSize),
+ SharkType::jint_type(),
+ "vtable_length");
+ vtable_length =
+ builder()->CreateIntCast(vtable_length, SharkType::intptr_type(), false);
+
+ bool needs_aligning = HeapWordsPerLong > 1;
+ Value *itable_start = builder()->CreateAdd(
+ vtable_start,
+ builder()->CreateShl(
+ vtable_length,
+ LLVMValue::intptr_constant(exact_log2(vtableEntry::size() * wordSize))),
+ needs_aligning ? "" : "itable_start");
+ if (needs_aligning) {
+ itable_start = builder()->CreateAnd(
+ builder()->CreateAdd(
+ itable_start, LLVMValue::intptr_constant(BytesPerLong - 1)),
+ LLVMValue::intptr_constant(~(BytesPerLong - 1)),
+ "itable_start");
+ }
+
+ // Locate this interface's entry in the table
+ Value *iklass = builder()->CreateInlineOop(method->holder());
+ BasicBlock *loop_entry = builder()->GetInsertBlock();
+ builder()->CreateBr(loop);
+ builder()->SetInsertPoint(loop);
+ PHINode *itable_entry_addr = builder()->CreatePHI(
+ SharkType::intptr_type(), "itable_entry_addr");
+ itable_entry_addr->addIncoming(itable_start, loop_entry);
+
+ Value *itable_entry = builder()->CreateIntToPtr(
+ itable_entry_addr, SharkType::itableOffsetEntry_type(), "itable_entry");
+
+ Value *itable_iklass = builder()->CreateValueOfStructEntry(
+ itable_entry,
+ in_ByteSize(itableOffsetEntry::interface_offset_in_bytes()),
+ SharkType::oop_type(),
+ "itable_iklass");
+
+ builder()->CreateCondBr(
+ builder()->CreateICmpEQ(itable_iklass, LLVMValue::null()),
+ got_null, not_null);
+
+ // A null entry means that the class doesn't implement the
+ // interface, and wasn't the same as the class checked when
+ // the interface was resolved.
+ builder()->SetInsertPoint(got_null);
+ builder()->CreateUnimplemented(__FILE__, __LINE__);
+ builder()->CreateUnreachable();
+
+ builder()->SetInsertPoint(not_null);
+ builder()->CreateCondBr(
+ builder()->CreateICmpEQ(itable_iklass, iklass),
+ got_entry, next);
+
+ builder()->SetInsertPoint(next);
+ Value *next_entry = builder()->CreateAdd(
+ itable_entry_addr,
+ LLVMValue::intptr_constant(itableOffsetEntry::size() * wordSize));
+ builder()->CreateBr(loop);
+ itable_entry_addr->addIncoming(next_entry, next);
+
+ // Locate the method pointer
+ builder()->SetInsertPoint(got_entry);
+ Value *offset = builder()->CreateValueOfStructEntry(
+ itable_entry,
+ in_ByteSize(itableOffsetEntry::offset_offset_in_bytes()),
+ SharkType::jint_type(),
+ "offset");
+ offset =
+ builder()->CreateIntCast(offset, SharkType::intptr_type(), false);
+
+ return builder()->CreateLoad(
+ builder()->CreateIntToPtr(
+ builder()->CreateAdd(
+ builder()->CreateAdd(
+ builder()->CreateAdd(
+ builder()->CreatePtrToInt(
+ object_klass, SharkType::intptr_type()),
+ offset),
+ LLVMValue::intptr_constant(
+ method->itable_index() * itableMethodEntry::size() * wordSize)),
+ LLVMValue::intptr_constant(
+ itableMethodEntry::method_offset_in_bytes())),
+ PointerType::getUnqual(SharkType::methodOop_type())),
+ "callee");
+}
+
+void SharkTopLevelBlock::do_call() {
+ // Set frequently used booleans
+ bool is_static = bc() == Bytecodes::_invokestatic;
+ bool is_virtual = bc() == Bytecodes::_invokevirtual;
+ bool is_interface = bc() == Bytecodes::_invokeinterface;
+
+ // Find the method being called
+ bool will_link;
+ ciMethod *dest_method = iter()->get_method(will_link);
+ assert(will_link, "typeflow responsibility");
+ assert(dest_method->is_static() == is_static, "must match bc");
+
+ // Find the class of the method being called. Note
+ // that the superclass check in the second assertion
+ // is to cope with a hole in the spec that allows for
+ // invokeinterface instructions where the resolved
+ // method is a virtual method in java.lang.Object.
+ // javac doesn't generate code like that, but there's
+ // no reason a compliant Java compiler might not.
+ ciInstanceKlass *holder_klass = dest_method->holder();
+ assert(holder_klass->is_loaded(), "scan_for_traps responsibility");
+ assert(holder_klass->is_interface() ||
+ holder_klass->super() == NULL ||
+ !is_interface, "must match bc");
+ ciKlass *holder = iter()->get_declared_method_holder();
+ ciInstanceKlass *klass =
+ ciEnv::get_instance_klass_for_declared_method_holder(holder);
+
+ // Find the receiver in the stack. We do this before
+ // trying to inline because the inliner can only use
+ // zero-checked values, not being able to perform the
+ // check itself.
+ SharkValue *receiver = NULL;
+ if (!is_static) {
+ receiver = xstack(dest_method->arg_size() - 1);
+ check_null(receiver);
+ }
+
+ // Try to improve non-direct calls
+ bool call_is_virtual = is_virtual || is_interface;
+ ciMethod *call_method = dest_method;
+ if (call_is_virtual) {
+ ciMethod *optimized_method = improve_virtual_call(
+ target(), klass, dest_method, receiver->type());
+ if (optimized_method) {
+ call_method = optimized_method;
+ call_is_virtual = false;
+ }
+ }
+
+ // Try to inline the call
+ if (!call_is_virtual) {
+ if (SharkInliner::attempt_inline(call_method, current_state()))
+ return;
+ }
+
+ // Find the method we are calling
+ Value *callee;
+ if (call_is_virtual) {
+ if (is_virtual) {
+ assert(klass->is_linked(), "scan_for_traps responsibility");
+ int vtable_index = call_method->resolve_vtable_index(
+ target()->holder(), klass);
+ assert(vtable_index >= 0, "should be");
+ callee = get_virtual_callee(receiver, vtable_index);
+ }
+ else {
+ assert(is_interface, "should be");
+ callee = get_interface_callee(receiver, call_method);
+ }
+ }
+ else {
+ callee = get_direct_callee(call_method);
+ }
+
+ // Load the SharkEntry from the callee
+ Value *base_pc = builder()->CreateValueOfStructEntry(
+ callee, methodOopDesc::from_interpreted_offset(),
+ SharkType::intptr_type(),
+ "base_pc");
+
+ // Load the entry point from the SharkEntry
+ Value *entry_point = builder()->CreateLoad(
+ builder()->CreateIntToPtr(
+ builder()->CreateAdd(
+ base_pc,
+ LLVMValue::intptr_constant(in_bytes(ZeroEntry::entry_point_offset()))),
+ PointerType::getUnqual(
+ PointerType::getUnqual(SharkType::entry_point_type()))),
+ "entry_point");
+
+ // Make the call
+ decache_for_Java_call(call_method);
+ Value *deoptimized_frames = builder()->CreateCall3(
+ entry_point, callee, base_pc, thread());
+
+ // If the callee got deoptimized then reexecute in the interpreter
+ BasicBlock *reexecute = function()->CreateBlock("reexecute");
+ BasicBlock *call_completed = function()->CreateBlock("call_completed");
+ builder()->CreateCondBr(
+ builder()->CreateICmpNE(deoptimized_frames, LLVMValue::jint_constant(0)),
+ reexecute, call_completed);
+
+ builder()->SetInsertPoint(reexecute);
+ builder()->CreateCall2(
+ builder()->deoptimized_entry_point(),
+ builder()->CreateSub(deoptimized_frames, LLVMValue::jint_constant(1)),
+ thread());
+ builder()->CreateBr(call_completed);
+
+ // Cache after the call
+ builder()->SetInsertPoint(call_completed);
+ cache_after_Java_call(call_method);
+
+ // Check for pending exceptions
+ check_pending_exception(EX_CHECK_FULL);
+
+ // Mark that a safepoint check has occurred
+ current_state()->set_has_safepointed(true);
+}
+
+bool SharkTopLevelBlock::static_subtype_check(ciKlass* check_klass,
+ ciKlass* object_klass) {
+ // If the class we're checking against is java.lang.Object
+ // then this is a no brainer. Apparently this can happen
+ // in reflective code...
+ if (check_klass == java_lang_Object_klass())
+ return true;
+
+ // Perform a subtype check. NB in opto's code for this
+ // (GraphKit::static_subtype_check) it says that static
+ // interface types cannot be trusted, and if opto can't
+ // trust them then I assume we can't either.
+ if (object_klass->is_loaded() && !object_klass->is_interface()) {
+ if (object_klass == check_klass)
+ return true;
+
+ if (check_klass->is_loaded() && object_klass->is_subtype_of(check_klass))
+ return true;
+ }
+
+ return false;
+}
+
+void SharkTopLevelBlock::do_instance_check() {
+ // Get the class we're checking against
+ bool will_link;
+ ciKlass *check_klass = iter()->get_klass(will_link);
+
+ // Get the class of the object we're checking
+ ciKlass *object_klass = xstack(0)->type()->as_klass();
+
+ // Can we optimize this check away?
+ if (static_subtype_check(check_klass, object_klass)) {
+ if (bc() == Bytecodes::_instanceof) {
+ pop();
+ push(SharkValue::jint_constant(1));
+ }
+ return;
+ }
+
+ // Need to check this one at runtime
+ if (will_link)
+ do_full_instance_check(check_klass);
+ else
+ do_trapping_instance_check(check_klass);
+}
+
+bool SharkTopLevelBlock::maybe_do_instanceof_if() {
+ // Get the class we're checking against
+ bool will_link;
+ ciKlass *check_klass = iter()->get_klass(will_link);
+
+ // If the class is unloaded then the instanceof
+ // cannot possibly succeed.
+ if (!will_link)
+ return false;
+
+ // Keep a copy of the object we're checking
+ SharkValue *old_object = xstack(0);
+
+ // Get the class of the object we're checking
+ ciKlass *object_klass = old_object->type()->as_klass();
+
+ // If the instanceof can be optimized away at compile time
+ // then any subsequent checkcasts will be too so we handle
+ // it normally.
+ if (static_subtype_check(check_klass, object_klass))
+ return false;
+
+ // Perform the instance check
+ do_full_instance_check(check_klass);
+ Value *result = pop()->jint_value();
+
+ // Create the casted object
+ SharkValue *new_object = SharkValue::create_generic(
+ check_klass, old_object->jobject_value(), old_object->zero_checked());
+
+ // Create two copies of the current state, one with the
+ // original object and one with all instances of the
+ // original object replaced with the new, casted object.
+ SharkState *new_state = current_state();
+ SharkState *old_state = new_state->copy();
+ new_state->replace_all(old_object, new_object);
+
+ // Perform the check-and-branch
+ switch (iter()->next_bc()) {
+ case Bytecodes::_ifeq:
+ // branch if not an instance
+ do_if_helper(
+ ICmpInst::ICMP_EQ,
+ LLVMValue::jint_constant(0), result,
+ old_state, new_state);
+ break;
+
+ case Bytecodes::_ifne:
+ // branch if an instance
+ do_if_helper(
+ ICmpInst::ICMP_NE,
+ LLVMValue::jint_constant(0), result,
+ new_state, old_state);
+ break;
+
+ default:
+ ShouldNotReachHere();
+ }
+
+ return true;
+}
+
+void SharkTopLevelBlock::do_full_instance_check(ciKlass* klass) {
+ BasicBlock *not_null = function()->CreateBlock("not_null");
+ BasicBlock *subtype_check = function()->CreateBlock("subtype_check");
+ BasicBlock *is_instance = function()->CreateBlock("is_instance");
+ BasicBlock *not_instance = function()->CreateBlock("not_instance");
+ BasicBlock *merge1 = function()->CreateBlock("merge1");
+ BasicBlock *merge2 = function()->CreateBlock("merge2");
+
+ enum InstanceCheckStates {
+ IC_IS_NULL,
+ IC_IS_INSTANCE,
+ IC_NOT_INSTANCE,
+ };
+
+ // Pop the object off the stack
+ Value *object = pop()->jobject_value();
+
+ // Null objects aren't instances of anything
+ builder()->CreateCondBr(
+ builder()->CreateICmpEQ(object, LLVMValue::null()),
+ merge2, not_null);
+ BasicBlock *null_block = builder()->GetInsertBlock();
+
+ // Get the class we're checking against
+ builder()->SetInsertPoint(not_null);
+ Value *check_klass = builder()->CreateInlineOop(klass);
+
+ // Get the class of the object being tested
+ Value *object_klass = builder()->CreateValueOfStructEntry(
+ object, in_ByteSize(oopDesc::klass_offset_in_bytes()),
+ SharkType::oop_type(),
+ "object_klass");
+
+ // Perform the check
+ builder()->CreateCondBr(
+ builder()->CreateICmpEQ(check_klass, object_klass),
+ is_instance, subtype_check);
+
+ builder()->SetInsertPoint(subtype_check);
+ builder()->CreateCondBr(
+ builder()->CreateICmpNE(
+ builder()->CreateCall2(
+ builder()->is_subtype_of(), check_klass, object_klass),
+ LLVMValue::jbyte_constant(0)),
+ is_instance, not_instance);
+
+ builder()->SetInsertPoint(is_instance);
+ builder()->CreateBr(merge1);
+
+ builder()->SetInsertPoint(not_instance);
+ builder()->CreateBr(merge1);
+
+ // First merge
+ builder()->SetInsertPoint(merge1);
+ PHINode *nonnull_result = builder()->CreatePHI(
+ SharkType::jint_type(), "nonnull_result");
+ nonnull_result->addIncoming(
+ LLVMValue::jint_constant(IC_IS_INSTANCE), is_instance);
+ nonnull_result->addIncoming(
+ LLVMValue::jint_constant(IC_NOT_INSTANCE), not_instance);
+ BasicBlock *nonnull_block = builder()->GetInsertBlock();
+ builder()->CreateBr(merge2);
+
+ // Second merge
+ builder()->SetInsertPoint(merge2);
+ PHINode *result = builder()->CreatePHI(
+ SharkType::jint_type(), "result");
+ result->addIncoming(LLVMValue::jint_constant(IC_IS_NULL), null_block);
+ result->addIncoming(nonnull_result, nonnull_block);
+
+ // Handle the result
+ if (bc() == Bytecodes::_checkcast) {
+ BasicBlock *failure = function()->CreateBlock("failure");
+ BasicBlock *success = function()->CreateBlock("success");
+
+ builder()->CreateCondBr(
+ builder()->CreateICmpNE(
+ result, LLVMValue::jint_constant(IC_NOT_INSTANCE)),
+ success, failure);
+
+ builder()->SetInsertPoint(failure);
+ SharkState *saved_state = current_state()->copy();
+
+ call_vm(
+ builder()->throw_ClassCastException(),
+ builder()->CreateIntToPtr(
+ LLVMValue::intptr_constant((intptr_t) __FILE__),
+ PointerType::getUnqual(SharkType::jbyte_type())),
+ LLVMValue::jint_constant(__LINE__),
+ EX_CHECK_NONE);
+
+ Value *pending_exception = get_pending_exception();
+ clear_pending_exception();
+ handle_exception(pending_exception, EX_CHECK_FULL);
+
+ set_current_state(saved_state);
+ builder()->SetInsertPoint(success);
+ push(SharkValue::create_generic(klass, object, false));
+ }
+ else {
+ push(
+ SharkValue::create_jint(
+ builder()->CreateIntCast(
+ builder()->CreateICmpEQ(
+ result, LLVMValue::jint_constant(IC_IS_INSTANCE)),
+ SharkType::jint_type(), false), false));
+ }
+}
+
+void SharkTopLevelBlock::do_trapping_instance_check(ciKlass* klass) {
+ BasicBlock *not_null = function()->CreateBlock("not_null");
+ BasicBlock *is_null = function()->CreateBlock("null");
+
+ // Leave the object on the stack so it's there if we trap
+ builder()->CreateCondBr(
+ builder()->CreateICmpEQ(xstack(0)->jobject_value(), LLVMValue::null()),
+ is_null, not_null);
+ SharkState *saved_state = current_state()->copy();
+
+ // If it's not null then we need to trap
+ builder()->SetInsertPoint(not_null);
+ set_current_state(saved_state->copy());
+ do_trap(
+ Deoptimization::make_trap_request(
+ Deoptimization::Reason_uninitialized,
+ Deoptimization::Action_reinterpret));
+
+ // If it's null then we're ok
+ builder()->SetInsertPoint(is_null);
+ set_current_state(saved_state);
+ if (bc() == Bytecodes::_checkcast) {
+ push(SharkValue::create_generic(klass, pop()->jobject_value(), false));
+ }
+ else {
+ pop();
+ push(SharkValue::jint_constant(0));
+ }
+}
+
+void SharkTopLevelBlock::do_new() {
+ bool will_link;
+ ciInstanceKlass* klass = iter()->get_klass(will_link)->as_instance_klass();
+ assert(will_link, "typeflow responsibility");
+
+ BasicBlock *got_tlab = NULL;
+ BasicBlock *heap_alloc = NULL;
+ BasicBlock *retry = NULL;
+ BasicBlock *got_heap = NULL;
+ BasicBlock *initialize = NULL;
+ BasicBlock *got_fast = NULL;
+ BasicBlock *slow_alloc_and_init = NULL;
+ BasicBlock *got_slow = NULL;
+ BasicBlock *push_object = NULL;
+
+ SharkState *fast_state = NULL;
+
+ Value *tlab_object = NULL;
+ Value *heap_object = NULL;
+ Value *fast_object = NULL;
+ Value *slow_object = NULL;
+ Value *object = NULL;
+
+ // The fast path
+ if (!Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
+ if (UseTLAB) {
+ got_tlab = function()->CreateBlock("got_tlab");
+ heap_alloc = function()->CreateBlock("heap_alloc");
+ }
+ retry = function()->CreateBlock("retry");
+ got_heap = function()->CreateBlock("got_heap");
+ initialize = function()->CreateBlock("initialize");
+ slow_alloc_and_init = function()->CreateBlock("slow_alloc_and_init");
+ push_object = function()->CreateBlock("push_object");
+
+ size_t size_in_bytes = klass->size_helper() << LogHeapWordSize;
+
+ // Thread local allocation
+ if (UseTLAB) {
+ Value *top_addr = builder()->CreateAddressOfStructEntry(
+ thread(), Thread::tlab_top_offset(),
+ PointerType::getUnqual(SharkType::intptr_type()),
+ "top_addr");
+
+ Value *end = builder()->CreateValueOfStructEntry(
+ thread(), Thread::tlab_end_offset(),
+ SharkType::intptr_type(),
+ "end");
+
+ Value *old_top = builder()->CreateLoad(top_addr, "old_top");
+ Value *new_top = builder()->CreateAdd(
+ old_top, LLVMValue::intptr_constant(size_in_bytes));
+
+ builder()->CreateCondBr(
+ builder()->CreateICmpULE(new_top, end),
+ got_tlab, heap_alloc);
+
+ builder()->SetInsertPoint(got_tlab);
+ tlab_object = builder()->CreateIntToPtr(
+ old_top, SharkType::oop_type(), "tlab_object");
+
+ builder()->CreateStore(new_top, top_addr);
+ builder()->CreateBr(initialize);
+
+ builder()->SetInsertPoint(heap_alloc);
+ }
+
+ // Heap allocation
+ Value *top_addr = builder()->CreateIntToPtr(
+ LLVMValue::intptr_constant((intptr_t) Universe::heap()->top_addr()),
+ PointerType::getUnqual(SharkType::intptr_type()),
+ "top_addr");
+
+ Value *end = builder()->CreateLoad(
+ builder()->CreateIntToPtr(
+ LLVMValue::intptr_constant((intptr_t) Universe::heap()->end_addr()),
+ PointerType::getUnqual(SharkType::intptr_type())),
+ "end");
+
+ builder()->CreateBr(retry);
+ builder()->SetInsertPoint(retry);
+
+ Value *old_top = builder()->CreateLoad(top_addr, "top");
+ Value *new_top = builder()->CreateAdd(
+ old_top, LLVMValue::intptr_constant(size_in_bytes));
+
+ builder()->CreateCondBr(
+ builder()->CreateICmpULE(new_top, end),
+ got_heap, slow_alloc_and_init);
+
+ builder()->SetInsertPoint(got_heap);
+ heap_object = builder()->CreateIntToPtr(
+ old_top, SharkType::oop_type(), "heap_object");
+
+ Value *check = builder()->CreateCmpxchgPtr(new_top, top_addr, old_top);
+ builder()->CreateCondBr(
+ builder()->CreateICmpEQ(old_top, check),
+ initialize, retry);
+
+ // Initialize the object
+ builder()->SetInsertPoint(initialize);
+ if (tlab_object) {
+ PHINode *phi = builder()->CreatePHI(
+ SharkType::oop_type(), "fast_object");
+ phi->addIncoming(tlab_object, got_tlab);
+ phi->addIncoming(heap_object, got_heap);
+ fast_object = phi;
+ }
+ else {
+ fast_object = heap_object;
+ }
+
+ builder()->CreateMemset(
+ builder()->CreateBitCast(
+ fast_object, PointerType::getUnqual(SharkType::jbyte_type())),
+ LLVMValue::jbyte_constant(0),
+ LLVMValue::jint_constant(size_in_bytes),
+ LLVMValue::jint_constant(HeapWordSize));
+
+ Value *mark_addr = builder()->CreateAddressOfStructEntry(
+ fast_object, in_ByteSize(oopDesc::mark_offset_in_bytes()),
+ PointerType::getUnqual(SharkType::intptr_type()),
+ "mark_addr");
+
+ Value *klass_addr = builder()->CreateAddressOfStructEntry(
+ fast_object, in_ByteSize(oopDesc::klass_offset_in_bytes()),
+ PointerType::getUnqual(SharkType::oop_type()),
+ "klass_addr");
+
+ // Set the mark
+ intptr_t mark;
+ if (UseBiasedLocking) {
+ Unimplemented();
+ }
+ else {
+ mark = (intptr_t) markOopDesc::prototype();
+ }
+ builder()->CreateStore(LLVMValue::intptr_constant(mark), mark_addr);
+
+ // Set the class
+ Value *rtklass = builder()->CreateInlineOop(klass);
+ builder()->CreateStore(rtklass, klass_addr);
+ got_fast = builder()->GetInsertBlock();
+
+ builder()->CreateBr(push_object);
+ builder()->SetInsertPoint(slow_alloc_and_init);
+ fast_state = current_state()->copy();
+ }
+
+ // The slow path
+ call_vm(
+ builder()->new_instance(),
+ LLVMValue::jint_constant(iter()->get_klass_index()),
+ EX_CHECK_FULL);
+ slow_object = get_vm_result();
+ got_slow = builder()->GetInsertBlock();
+
+ // Push the object
+ if (push_object) {
+ builder()->CreateBr(push_object);
+ builder()->SetInsertPoint(push_object);
+ }
+ if (fast_object) {
+ PHINode *phi = builder()->CreatePHI(SharkType::oop_type(), "object");
+ phi->addIncoming(fast_object, got_fast);
+ phi->addIncoming(slow_object, got_slow);
+ object = phi;
+ current_state()->merge(fast_state, got_fast, got_slow);
+ }
+ else {
+ object = slow_object;
+ }
+
+ push(SharkValue::create_jobject(object, true));
+}
+
+void SharkTopLevelBlock::do_newarray() {
+ BasicType type = (BasicType) iter()->get_index();
+
+ call_vm(
+ builder()->newarray(),
+ LLVMValue::jint_constant(type),
+ pop()->jint_value(),
+ EX_CHECK_FULL);
+
+ ciArrayKlass *array_klass = ciArrayKlass::make(ciType::make(type));
+ push(SharkValue::create_generic(array_klass, get_vm_result(), true));
+}
+
+void SharkTopLevelBlock::do_anewarray() {
+ bool will_link;
+ ciKlass *klass = iter()->get_klass(will_link);
+ assert(will_link, "typeflow responsibility");
+
+ ciObjArrayKlass *array_klass = ciObjArrayKlass::make(klass);
+ if (!array_klass->is_loaded()) {
+ Unimplemented();
+ }
+
+ call_vm(
+ builder()->anewarray(),
+ LLVMValue::jint_constant(iter()->get_klass_index()),
+ pop()->jint_value(),
+ EX_CHECK_FULL);
+
+ push(SharkValue::create_generic(array_klass, get_vm_result(), true));
+}
+
+void SharkTopLevelBlock::do_multianewarray() {
+ bool will_link;
+ ciArrayKlass *array_klass = iter()->get_klass(will_link)->as_array_klass();
+ assert(will_link, "typeflow responsibility");
+
+ // The dimensions are stack values, so we use their slots for the
+ // dimensions array. Note that we are storing them in the reverse
+ // of normal stack order.
+ int ndims = iter()->get_dimensions();
+
+ Value *dimensions = stack()->slot_addr(
+ stack()->stack_slots_offset() + max_stack() - xstack_depth(),
+ ArrayType::get(SharkType::jint_type(), ndims),
+ "dimensions");
+
+ for (int i = 0; i < ndims; i++) {
+ builder()->CreateStore(
+ xstack(ndims - 1 - i)->jint_value(),
+ builder()->CreateStructGEP(dimensions, i));
+ }
+
+ call_vm(
+ builder()->multianewarray(),
+ LLVMValue::jint_constant(iter()->get_klass_index()),
+ LLVMValue::jint_constant(ndims),
+ builder()->CreateStructGEP(dimensions, 0),
+ EX_CHECK_FULL);
+
+ // Now we can pop the dimensions off the stack
+ for (int i = 0; i < ndims; i++)
+ pop();
+
+ push(SharkValue::create_generic(array_klass, get_vm_result(), true));
+}
+
+void SharkTopLevelBlock::acquire_method_lock() {
+ Value *lockee;
+ if (target()->is_static())
+ lockee = builder()->CreateInlineOop(target()->holder()->java_mirror());
+ else
+ lockee = local(0)->jobject_value();
+
+ iter()->force_bci(start()); // for the decache in acquire_lock
+ acquire_lock(lockee, EX_CHECK_NO_CATCH);
+}
+
+void SharkTopLevelBlock::do_monitorenter() {
+ SharkValue *lockee = pop();
+ check_null(lockee);
+ acquire_lock(lockee->jobject_value(), EX_CHECK_FULL);
+}
+
+void SharkTopLevelBlock::do_monitorexit() {
+ pop(); // don't need this (monitors are block structured)
+ release_lock(EX_CHECK_NO_CATCH);
+}
+
+void SharkTopLevelBlock::acquire_lock(Value *lockee, int exception_action) {
+ BasicBlock *try_recursive = function()->CreateBlock("try_recursive");
+ BasicBlock *got_recursive = function()->CreateBlock("got_recursive");
+ BasicBlock *not_recursive = function()->CreateBlock("not_recursive");
+ BasicBlock *acquired_fast = function()->CreateBlock("acquired_fast");
+ BasicBlock *lock_acquired = function()->CreateBlock("lock_acquired");
+
+ int monitor = num_monitors();
+ Value *monitor_addr = stack()->monitor_addr(monitor);
+ Value *monitor_object_addr = stack()->monitor_object_addr(monitor);
+ Value *monitor_header_addr = stack()->monitor_header_addr(monitor);
+
+ // Store the object and mark the slot as live
+ builder()->CreateStore(lockee, monitor_object_addr);
+ set_num_monitors(monitor + 1);
+
+ // Try a simple lock
+ Value *mark_addr = builder()->CreateAddressOfStructEntry(
+ lockee, in_ByteSize(oopDesc::mark_offset_in_bytes()),
+ PointerType::getUnqual(SharkType::intptr_type()),
+ "mark_addr");
+
+ Value *mark = builder()->CreateLoad(mark_addr, "mark");
+ Value *disp = builder()->CreateOr(
+ mark, LLVMValue::intptr_constant(markOopDesc::unlocked_value), "disp");
+ builder()->CreateStore(disp, monitor_header_addr);
+
+ Value *lock = builder()->CreatePtrToInt(
+ monitor_header_addr, SharkType::intptr_type());
+ Value *check = builder()->CreateCmpxchgPtr(lock, mark_addr, disp);
+ builder()->CreateCondBr(
+ builder()->CreateICmpEQ(disp, check),
+ acquired_fast, try_recursive);
+
+ // Locking failed, but maybe this thread already owns it
+ builder()->SetInsertPoint(try_recursive);
+ Value *addr = builder()->CreateAnd(
+ disp,
+ LLVMValue::intptr_constant(~markOopDesc::lock_mask_in_place));
+
+ // NB we use the entire stack, but JavaThread::is_lock_owned()
+ // uses a more limited range. I don't think it hurts though...
+ Value *stack_limit = builder()->CreateValueOfStructEntry(
+ thread(), Thread::stack_base_offset(),
+ SharkType::intptr_type(),
+ "stack_limit");
+
+ assert(sizeof(size_t) == sizeof(intptr_t), "should be");
+ Value *stack_size = builder()->CreateValueOfStructEntry(
+ thread(), Thread::stack_size_offset(),
+ SharkType::intptr_type(),
+ "stack_size");
+
+ Value *stack_start =
+ builder()->CreateSub(stack_limit, stack_size, "stack_start");
+
+ builder()->CreateCondBr(
+ builder()->CreateAnd(
+ builder()->CreateICmpUGE(addr, stack_start),
+ builder()->CreateICmpULT(addr, stack_limit)),
+ got_recursive, not_recursive);
+
+ builder()->SetInsertPoint(got_recursive);
+ builder()->CreateStore(LLVMValue::intptr_constant(0), monitor_header_addr);
+ builder()->CreateBr(acquired_fast);
+
+ // Create an edge for the state merge
+ builder()->SetInsertPoint(acquired_fast);
+ SharkState *fast_state = current_state()->copy();
+ builder()->CreateBr(lock_acquired);
+
+ // It's not a recursive case so we need to drop into the runtime
+ builder()->SetInsertPoint(not_recursive);
+ call_vm(
+ builder()->monitorenter(), monitor_addr,
+ exception_action | EAM_MONITOR_FUDGE);
+ BasicBlock *acquired_slow = builder()->GetInsertBlock();
+ builder()->CreateBr(lock_acquired);
+
+ // All done
+ builder()->SetInsertPoint(lock_acquired);
+ current_state()->merge(fast_state, acquired_fast, acquired_slow);
+}
+
+void SharkTopLevelBlock::release_lock(int exception_action) {
+ BasicBlock *not_recursive = function()->CreateBlock("not_recursive");
+ BasicBlock *released_fast = function()->CreateBlock("released_fast");
+ BasicBlock *slow_path = function()->CreateBlock("slow_path");
+ BasicBlock *lock_released = function()->CreateBlock("lock_released");
+
+ int monitor = num_monitors() - 1;
+ Value *monitor_addr = stack()->monitor_addr(monitor);
+ Value *monitor_object_addr = stack()->monitor_object_addr(monitor);
+ Value *monitor_header_addr = stack()->monitor_header_addr(monitor);
+
+ // If it is recursive then we're already done
+ Value *disp = builder()->CreateLoad(monitor_header_addr);
+ builder()->CreateCondBr(
+ builder()->CreateICmpEQ(disp, LLVMValue::intptr_constant(0)),
+ released_fast, not_recursive);
+
+ // Try a simple unlock
+ builder()->SetInsertPoint(not_recursive);
+
+ Value *lock = builder()->CreatePtrToInt(
+ monitor_header_addr, SharkType::intptr_type());
+
+ Value *lockee = builder()->CreateLoad(monitor_object_addr);
+
+ Value *mark_addr = builder()->CreateAddressOfStructEntry(
+ lockee, in_ByteSize(oopDesc::mark_offset_in_bytes()),
+ PointerType::getUnqual(SharkType::intptr_type()),
+ "mark_addr");
+
+ Value *check = builder()->CreateCmpxchgPtr(disp, mark_addr, lock);
+ builder()->CreateCondBr(
+ builder()->CreateICmpEQ(lock, check),
+ released_fast, slow_path);
+
+ // Create an edge for the state merge
+ builder()->SetInsertPoint(released_fast);
+ SharkState *fast_state = current_state()->copy();
+ builder()->CreateBr(lock_released);
+
+ // Need to drop into the runtime to release this one
+ builder()->SetInsertPoint(slow_path);
+ call_vm(builder()->monitorexit(), monitor_addr, exception_action);
+ BasicBlock *released_slow = builder()->GetInsertBlock();
+ builder()->CreateBr(lock_released);
+
+ // All done
+ builder()->SetInsertPoint(lock_released);
+ current_state()->merge(fast_state, released_fast, released_slow);
+
+ // The object slot is now dead
+ set_num_monitors(monitor);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkTopLevelBlock.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009, 2010 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+class SharkTopLevelBlock : public SharkBlock {
+ public:
+ SharkTopLevelBlock(SharkFunction* function, ciTypeFlow::Block* ciblock)
+ : SharkBlock(function),
+ _function(function),
+ _ciblock(ciblock),
+ _entered(false),
+ _has_trap(false),
+ _needs_phis(false),
+ _entry_state(NULL),
+ _entry_block(NULL) {}
+
+ private:
+ SharkFunction* _function;
+ ciTypeFlow::Block* _ciblock;
+
+ public:
+ SharkFunction* function() const {
+ return _function;
+ }
+ ciTypeFlow::Block* ciblock() const {
+ return _ciblock;
+ }
+
+ // Function properties
+ public:
+ SharkStack* stack() const {
+ return function()->stack();
+ }
+
+ // Typeflow properties
+ public:
+ int index() const {
+ return ciblock()->pre_order();
+ }
+ bool is_backedge_copy() const {
+ return ciblock()->is_backedge_copy();
+ }
+ int stack_depth_at_entry() const {
+ return ciblock()->stack_size();
+ }
+ ciType* local_type_at_entry(int index) const {
+ return ciblock()->local_type_at(index);
+ }
+ ciType* stack_type_at_entry(int slot) const {
+ return ciblock()->stack_type_at(slot);
+ }
+ int start() const {
+ return ciblock()->start();
+ }
+ int limit() const {
+ return ciblock()->limit();
+ }
+ bool falls_through() const {
+ return ciblock()->control() == ciBlock::fall_through_bci;
+ }
+ int num_successors() const {
+ return ciblock()->successors()->length();
+ }
+ SharkTopLevelBlock* successor(int index) const {
+ return function()->block(ciblock()->successors()->at(index)->pre_order());
+ }
+ SharkTopLevelBlock* bci_successor(int bci) const;
+
+ // Exceptions
+ private:
+ GrowableArray<ciExceptionHandler*>* _exc_handlers;
+ GrowableArray<SharkTopLevelBlock*>* _exceptions;
+
+ private:
+ void compute_exceptions();
+
+ private:
+ int num_exceptions() const {
+ return _exc_handlers->length();
+ }
+ ciExceptionHandler* exc_handler(int index) const {
+ return _exc_handlers->at(index);
+ }
+ SharkTopLevelBlock* exception(int index) const {
+ return _exceptions->at(index);
+ }
+
+ // Traps
+ private:
+ bool _has_trap;
+ int _trap_request;
+ int _trap_bci;
+
+ void set_trap(int trap_request, int trap_bci) {
+ assert(!has_trap(), "shouldn't have");
+ _has_trap = true;
+ _trap_request = trap_request;
+ _trap_bci = trap_bci;
+ }
+
+ private:
+ bool has_trap() {
+ return _has_trap;
+ }
+ int trap_request() {
+ assert(has_trap(), "should have");
+ return _trap_request;
+ }
+ int trap_bci() {
+ assert(has_trap(), "should have");
+ return _trap_bci;
+ }
+
+ private:
+ void scan_for_traps();
+
+ private:
+ bool static_field_ok_in_clinit(ciField* field);
+
+ // Entry state
+ private:
+ bool _entered;
+ bool _needs_phis;
+
+ public:
+ bool entered() const {
+ return _entered;
+ }
+ bool needs_phis() const {
+ return _needs_phis;
+ }
+
+ private:
+ void enter(SharkTopLevelBlock* predecessor, bool is_exception);
+
+ public:
+ void enter() {
+ enter(NULL, false);
+ }
+
+ private:
+ SharkState* _entry_state;
+
+ private:
+ SharkState* entry_state();
+
+ private:
+ llvm::BasicBlock* _entry_block;
+
+ public:
+ llvm::BasicBlock* entry_block() const {
+ return _entry_block;
+ }
+
+ public:
+ void initialize();
+
+ public:
+ void add_incoming(SharkState* incoming_state);
+
+ // Method
+ public:
+ llvm::Value* method() {
+ return current_state()->method();
+ }
+
+ // Temporary oop storage
+ public:
+ void set_oop_tmp(llvm::Value* value) {
+ assert(value, "value must be non-NULL (will be reset by get_oop_tmp)");
+ assert(!current_state()->oop_tmp(), "oop_tmp gets and sets must match");
+ current_state()->set_oop_tmp(value);
+ }
+ llvm::Value* get_oop_tmp() {
+ llvm::Value* value = current_state()->oop_tmp();
+ assert(value, "oop_tmp gets and sets must match");
+ current_state()->set_oop_tmp(NULL);
+ return value;
+ }
+
+ // Cache and decache
+ private:
+ void decache_for_Java_call(ciMethod* callee);
+ void cache_after_Java_call(ciMethod* callee);
+ void decache_for_VM_call();
+ void cache_after_VM_call();
+ void decache_for_trap();
+
+ // Monitors
+ private:
+ int num_monitors() {
+ return current_state()->num_monitors();
+ }
+ int set_num_monitors(int num_monitors) {
+ current_state()->set_num_monitors(num_monitors);
+ }
+
+ // Code generation
+ public:
+ void emit_IR();
+
+ // Branch helpers
+ private:
+ void do_branch(int successor_index);
+
+ // Zero checks
+ private:
+ void do_zero_check(SharkValue* value);
+ void zero_check_value(SharkValue* value, llvm::BasicBlock* continue_block);
+
+ public:
+ void do_deferred_zero_check(SharkValue* value,
+ int bci,
+ SharkState* saved_state,
+ llvm::BasicBlock* continue_block);
+ // Exceptions
+ private:
+ llvm::Value* pending_exception_address() const {
+ return builder()->CreateAddressOfStructEntry(
+ thread(), Thread::pending_exception_offset(),
+ llvm::PointerType::getUnqual(SharkType::oop_type()),
+ "pending_exception_addr");
+ }
+ llvm::LoadInst* get_pending_exception() const {
+ return builder()->CreateLoad(
+ pending_exception_address(), "pending_exception");
+ }
+ void clear_pending_exception() const {
+ builder()->CreateStore(LLVMValue::null(), pending_exception_address());
+ }
+ public:
+ enum ExceptionActionMask {
+ // The actual bitmasks that things test against
+ EAM_CHECK = 1, // whether to check for pending exceptions
+ EAM_HANDLE = 2, // whether to attempt to handle pending exceptions
+ EAM_MONITOR_FUDGE = 4, // whether the monitor count needs adjusting
+
+ // More convenient values for passing
+ EX_CHECK_NONE = 0,
+ EX_CHECK_NO_CATCH = EAM_CHECK,
+ EX_CHECK_FULL = EAM_CHECK | EAM_HANDLE
+ };
+ void check_pending_exception(int action);
+ void handle_exception(llvm::Value* exception, int action);
+ void marshal_exception_fast(int num_options);
+ void marshal_exception_slow(int num_options);
+ llvm::BasicBlock* handler_for_exception(int index);
+
+ // VM calls
+ private:
+ llvm::CallInst* call_vm(llvm::Value* callee,
+ llvm::Value** args_start,
+ llvm::Value** args_end,
+ int exception_action) {
+ decache_for_VM_call();
+ stack()->CreateSetLastJavaFrame();
+ llvm::CallInst *res = builder()->CreateCall(callee, args_start, args_end);
+ stack()->CreateResetLastJavaFrame();
+ cache_after_VM_call();
+ if (exception_action & EAM_CHECK) {
+ check_pending_exception(exception_action);
+ current_state()->set_has_safepointed(true);
+ }
+ return res;
+ }
+
+ public:
+ llvm::CallInst* call_vm(llvm::Value* callee,
+ int exception_action) {
+ llvm::Value *args[] = {thread()};
+ return call_vm(callee, args, args + 1, exception_action);
+ }
+ llvm::CallInst* call_vm(llvm::Value* callee,
+ llvm::Value* arg1,
+ int exception_action) {
+ llvm::Value *args[] = {thread(), arg1};
+ return call_vm(callee, args, args + 2, exception_action);
+ }
+ llvm::CallInst* call_vm(llvm::Value* callee,
+ llvm::Value* arg1,
+ llvm::Value* arg2,
+ int exception_action) {
+ llvm::Value *args[] = {thread(), arg1, arg2};
+ return call_vm(callee, args, args + 3, exception_action);
+ }
+ llvm::CallInst* call_vm(llvm::Value* callee,
+ llvm::Value* arg1,
+ llvm::Value* arg2,
+ llvm::Value* arg3,
+ int exception_action) {
+ llvm::Value *args[] = {thread(), arg1, arg2, arg3};
+ return call_vm(callee, args, args + 4, exception_action);
+ }
+
+ // VM call oop return handling
+ private:
+ llvm::LoadInst* get_vm_result() const {
+ llvm::Value *addr = builder()->CreateAddressOfStructEntry(
+ thread(), JavaThread::vm_result_offset(),
+ llvm::PointerType::getUnqual(SharkType::oop_type()),
+ "vm_result_addr");
+ llvm::LoadInst *result = builder()->CreateLoad(addr, "vm_result");
+ builder()->CreateStore(LLVMValue::null(), addr);
+ return result;
+ }
+
+ // Synchronization
+ private:
+ void acquire_lock(llvm::Value* lockee, int exception_action);
+ void release_lock(int exception_action);
+
+ public:
+ void acquire_method_lock();
+
+ // Bounds checks
+ private:
+ void check_bounds(SharkValue* array, SharkValue* index);
+
+ // Safepoints
+ private:
+ void maybe_add_safepoint();
+ void maybe_add_backedge_safepoint();
+
+ // Loop safepoint removal
+ private:
+ bool _can_reach_visited;
+
+ bool can_reach(SharkTopLevelBlock* other);
+ bool can_reach_helper(SharkTopLevelBlock* other);
+
+ // Traps
+ private:
+ llvm::BasicBlock* make_trap(int trap_bci, int trap_request);
+ void do_trap(int trap_request);
+
+ // Returns
+ private:
+ void call_register_finalizer(llvm::Value* receiver);
+ void handle_return(BasicType type, llvm::Value* exception);
+
+ // arraylength
+ private:
+ void do_arraylength();
+
+ // *aload and *astore
+ private:
+ void do_aload(BasicType basic_type);
+ void do_astore(BasicType basic_type);
+
+ // *return and athrow
+ private:
+ void do_return(BasicType type);
+ void do_athrow();
+
+ // goto*
+ private:
+ void do_goto();
+
+ // jsr* and ret
+ private:
+ void do_jsr();
+ void do_ret();
+
+ // if*
+ private:
+ void do_if_helper(llvm::ICmpInst::Predicate p,
+ llvm::Value* b,
+ llvm::Value* a,
+ SharkState* if_taken_state,
+ SharkState* not_taken_state);
+ void do_if(llvm::ICmpInst::Predicate p, SharkValue* b, SharkValue* a);
+
+ // tableswitch and lookupswitch
+ private:
+ void do_switch();
+
+ // invoke*
+ private:
+ ciMethod* improve_virtual_call(ciMethod* caller,
+ ciInstanceKlass* klass,
+ ciMethod* dest_method,
+ ciType* receiver_type);
+ llvm::Value* get_direct_callee(ciMethod* method);
+ llvm::Value* get_virtual_callee(SharkValue* receiver, int vtable_index);
+ llvm::Value* get_interface_callee(SharkValue* receiver, ciMethod* method);
+
+ void do_call();
+
+ // checkcast and instanceof
+ private:
+ bool static_subtype_check(ciKlass* check_klass, ciKlass* object_klass);
+ void do_full_instance_check(ciKlass* klass);
+ void do_trapping_instance_check(ciKlass* klass);
+
+ void do_instance_check();
+ bool maybe_do_instanceof_if();
+
+ // new and *newarray
+ private:
+ void do_new();
+ void do_newarray();
+ void do_anewarray();
+ void do_multianewarray();
+
+ // monitorenter and monitorexit
+ private:
+ void do_monitorenter();
+ void do_monitorexit();
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkType.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+class SharkType : public AllStatic {
+ private:
+ static SharkContext& context() {
+ return SharkContext::current();
+ }
+
+ // Basic types
+ public:
+ static const llvm::Type* void_type() {
+ return context().void_type();
+ }
+ static const llvm::IntegerType* bit_type() {
+ return context().bit_type();
+ }
+ static const llvm::IntegerType* jbyte_type() {
+ return context().jbyte_type();
+ }
+ static const llvm::IntegerType* jshort_type() {
+ return context().jshort_type();
+ }
+ static const llvm::IntegerType* jint_type() {
+ return context().jint_type();
+ }
+ static const llvm::IntegerType* jlong_type() {
+ return context().jlong_type();
+ }
+ static const llvm::Type* jfloat_type() {
+ return context().jfloat_type();
+ }
+ static const llvm::Type* jdouble_type() {
+ return context().jdouble_type();
+ }
+ static const llvm::IntegerType* intptr_type() {
+ return context().intptr_type();
+ }
+
+ // Compound types
+ public:
+ static const llvm::PointerType* itableOffsetEntry_type() {
+ return context().itableOffsetEntry_type();
+ }
+ static const llvm::PointerType* jniEnv_type() {
+ return context().jniEnv_type();
+ }
+ static const llvm::PointerType* jniHandleBlock_type() {
+ return context().jniHandleBlock_type();
+ }
+ static const llvm::PointerType* klass_type() {
+ return context().klass_type();
+ }
+ static const llvm::PointerType* methodOop_type() {
+ return context().methodOop_type();
+ }
+ static const llvm::ArrayType* monitor_type() {
+ return context().monitor_type();
+ }
+ static const llvm::PointerType* oop_type() {
+ return context().oop_type();
+ }
+ static const llvm::PointerType* thread_type() {
+ return context().thread_type();
+ }
+ static const llvm::PointerType* zeroStack_type() {
+ return context().zeroStack_type();
+ }
+ static const llvm::FunctionType* entry_point_type() {
+ return context().entry_point_type();
+ }
+ static const llvm::FunctionType* osr_entry_point_type() {
+ return context().osr_entry_point_type();
+ }
+
+ // Mappings
+ public:
+ static const llvm::Type* to_stackType(BasicType type) {
+ return context().to_stackType(type);
+ }
+ static const llvm::Type* to_stackType(ciType* type) {
+ return to_stackType(type->basic_type());
+ }
+ static const llvm::Type* to_arrayType(BasicType type) {
+ return context().to_arrayType(type);
+ }
+ static const llvm::Type* to_arrayType(ciType* type) {
+ return to_arrayType(type->basic_type());
+ }
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkValue.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_sharkValue.cpp.incl"
+
+using namespace llvm;
+
+// Cloning
+
+SharkValue* SharkNormalValue::clone() const {
+ return SharkValue::create_generic(type(), generic_value(), zero_checked());
+}
+SharkValue* SharkPHIValue::clone() const {
+ return SharkValue::create_phi(type(), (PHINode *) generic_value(), this);
+}
+SharkValue* SharkAddressValue::clone() const {
+ return SharkValue::address_constant(address_value());
+}
+
+// Casting
+
+bool SharkValue::is_phi() const {
+ return false;
+}
+bool SharkPHIValue::is_phi() const {
+ return true;
+}
+SharkPHIValue* SharkValue::as_phi() {
+ ShouldNotCallThis();
+}
+SharkPHIValue* SharkPHIValue::as_phi() {
+ return this;
+}
+
+// Comparison
+
+bool SharkNormalValue::equal_to(SharkValue *other) const {
+ return (this->type() == other->type() &&
+ this->generic_value() == other->generic_value() &&
+ this->zero_checked() == other->zero_checked());
+}
+bool SharkAddressValue::equal_to(SharkValue *other) const {
+ return (this->address_value() == other->address_value());
+}
+
+// Type access
+
+ciType* SharkValue::type() const {
+ ShouldNotCallThis();
+}
+ciType* SharkNormalValue::type() const {
+ return _type;
+}
+
+BasicType SharkNormalValue::basic_type() const {
+ return type()->basic_type();
+}
+BasicType SharkAddressValue::basic_type() const {
+ return T_ADDRESS;
+}
+
+int SharkNormalValue::size() const {
+ return type()->size();
+}
+int SharkAddressValue::size() const {
+ return 1;
+}
+
+bool SharkValue::is_jint() const {
+ return false;
+}
+bool SharkValue::is_jlong() const {
+ return false;
+}
+bool SharkValue::is_jfloat() const {
+ return false;
+}
+bool SharkValue::is_jdouble() const {
+ return false;
+}
+bool SharkValue::is_jobject() const {
+ return false;
+}
+bool SharkValue::is_jarray() const {
+ return false;
+}
+bool SharkValue::is_address() const {
+ return false;
+}
+
+bool SharkNormalValue::is_jint() const {
+ return llvm_value()->getType() == SharkType::jint_type();
+}
+bool SharkNormalValue::is_jlong() const {
+ return llvm_value()->getType() == SharkType::jlong_type();
+}
+bool SharkNormalValue::is_jfloat() const {
+ return llvm_value()->getType() == SharkType::jfloat_type();
+}
+bool SharkNormalValue::is_jdouble() const {
+ return llvm_value()->getType() == SharkType::jdouble_type();
+}
+bool SharkNormalValue::is_jobject() const {
+ return llvm_value()->getType() == SharkType::oop_type();
+}
+bool SharkNormalValue::is_jarray() const {
+ return basic_type() == T_ARRAY;
+}
+bool SharkAddressValue::is_address() const {
+ return true;
+}
+
+// Typed conversions from SharkValues
+
+Value* SharkValue::jint_value() const {
+ ShouldNotCallThis();
+}
+Value* SharkValue::jlong_value() const {
+ ShouldNotCallThis();
+}
+Value* SharkValue::jfloat_value() const {
+ ShouldNotCallThis();
+}
+Value* SharkValue::jdouble_value() const {
+ ShouldNotCallThis();
+}
+Value* SharkValue::jobject_value() const {
+ ShouldNotCallThis();
+}
+Value* SharkValue::jarray_value() const {
+ ShouldNotCallThis();
+}
+int SharkValue::address_value() const {
+ ShouldNotCallThis();
+}
+
+Value* SharkNormalValue::jint_value() const {
+ assert(is_jint(), "should be");
+ return llvm_value();
+}
+Value* SharkNormalValue::jlong_value() const {
+ assert(is_jlong(), "should be");
+ return llvm_value();
+}
+Value* SharkNormalValue::jfloat_value() const {
+ assert(is_jfloat(), "should be");
+ return llvm_value();
+}
+Value* SharkNormalValue::jdouble_value() const {
+ assert(is_jdouble(), "should be");
+ return llvm_value();
+}
+Value* SharkNormalValue::jobject_value() const {
+ assert(is_jobject(), "should be");
+ return llvm_value();
+}
+Value* SharkNormalValue::jarray_value() const {
+ // XXX assert(is_jarray(), "should be");
+ // XXX http://icedtea.classpath.org/bugzilla/show_bug.cgi?id=324
+ assert(is_jobject(), "should be");
+ return llvm_value();
+}
+int SharkAddressValue::address_value() const {
+ return _bci;
+}
+
+// Type-losing conversions -- use with care!
+
+Value* SharkNormalValue::generic_value() const {
+ return llvm_value();
+}
+Value* SharkAddressValue::generic_value() const {
+ return LLVMValue::intptr_constant(address_value());
+}
+
+Value* SharkValue::intptr_value(SharkBuilder* builder) const {
+ ShouldNotCallThis();
+}
+Value* SharkNormalValue::intptr_value(SharkBuilder* builder) const {
+ return builder->CreatePtrToInt(jobject_value(), SharkType::intptr_type());
+}
+
+// Phi-style stuff for SharkPHIState::add_incoming
+
+void SharkValue::addIncoming(SharkValue *value, BasicBlock* block) {
+ ShouldNotCallThis();
+}
+void SharkPHIValue::addIncoming(SharkValue *value, BasicBlock* block) {
+ assert(!is_clone(), "shouldn't be");
+ ((llvm::PHINode *) generic_value())->addIncoming(
+ value->generic_value(), block);
+ if (!value->zero_checked())
+ _all_incomers_zero_checked = false;
+}
+void SharkAddressValue::addIncoming(SharkValue *value, BasicBlock* block) {
+ assert(this->equal_to(value), "should be");
+}
+
+// Phi-style stuff for SharkState::merge
+
+SharkValue* SharkNormalValue::merge(SharkBuilder* builder,
+ SharkValue* other,
+ BasicBlock* other_block,
+ BasicBlock* this_block,
+ const char* name) {
+ assert(type() == other->type(), "should be");
+ assert(zero_checked() == other->zero_checked(), "should be");
+
+ PHINode *phi = builder->CreatePHI(SharkType::to_stackType(type()), name);
+ phi->addIncoming(this->generic_value(), this_block);
+ phi->addIncoming(other->generic_value(), other_block);
+ return SharkValue::create_generic(type(), phi, zero_checked());
+}
+SharkValue* SharkAddressValue::merge(SharkBuilder* builder,
+ SharkValue* other,
+ BasicBlock* other_block,
+ BasicBlock* this_block,
+ const char* name) {
+ assert(this->equal_to(other), "should be");
+ return this;
+}
+
+// Repeated null and divide-by-zero check removal
+
+bool SharkValue::zero_checked() const {
+ ShouldNotCallThis();
+}
+void SharkValue::set_zero_checked(bool zero_checked) {
+ ShouldNotCallThis();
+}
+
+bool SharkNormalValue::zero_checked() const {
+ return _zero_checked;
+}
+void SharkNormalValue::set_zero_checked(bool zero_checked) {
+ _zero_checked = zero_checked;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/sharkValue.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// Items on the stack and in local variables are tracked using
+// SharkValue objects.
+//
+// All SharkValues are one of two core types, SharkNormalValue
+// and SharkAddressValue, but no code outside this file should
+// ever refer to those directly. The split is because of the
+// way JSRs are handled: the typeflow pass expands them into
+// multiple copies, so the return addresses pushed by jsr and
+// popped by ret only exist at compile time. Having separate
+// classes for these allows us to check that our jsr handling
+// is correct, via assertions.
+//
+// There is one more type, SharkPHIValue, which is a subclass
+// of SharkNormalValue with a couple of extra methods. Use of
+// SharkPHIValue outside of this file is acceptable, so long
+// as it is obtained via SharkValue::as_phi().
+
+class SharkBuilder;
+class SharkPHIValue;
+
+class SharkValue : public ResourceObj {
+ protected:
+ SharkValue() {}
+
+ // Cloning
+ public:
+ virtual SharkValue* clone() const = 0;
+
+ // Casting
+ public:
+ virtual bool is_phi() const;
+ virtual SharkPHIValue* as_phi();
+
+ // Comparison
+ public:
+ virtual bool equal_to(SharkValue* other) const = 0;
+
+ // Type access
+ public:
+ virtual BasicType basic_type() const = 0;
+ virtual ciType* type() const;
+
+ virtual bool is_jint() const;
+ virtual bool is_jlong() const;
+ virtual bool is_jfloat() const;
+ virtual bool is_jdouble() const;
+ virtual bool is_jobject() const;
+ virtual bool is_jarray() const;
+ virtual bool is_address() const;
+
+ virtual int size() const = 0;
+
+ bool is_one_word() const {
+ return size() == 1;
+ }
+ bool is_two_word() const {
+ return size() == 2;
+ }
+
+ // Typed conversion from SharkValues
+ public:
+ virtual llvm::Value* jint_value() const;
+ virtual llvm::Value* jlong_value() const;
+ virtual llvm::Value* jfloat_value() const;
+ virtual llvm::Value* jdouble_value() const;
+ virtual llvm::Value* jobject_value() const;
+ virtual llvm::Value* jarray_value() const;
+ virtual int address_value() const;
+
+ // Typed conversion to SharkValues
+ public:
+ static SharkValue* create_jint(llvm::Value* value, bool zero_checked) {
+ assert(value->getType() == SharkType::jint_type(), "should be");
+ return create_generic(ciType::make(T_INT), value, zero_checked);
+ }
+ static SharkValue* create_jlong(llvm::Value* value, bool zero_checked) {
+ assert(value->getType() == SharkType::jlong_type(), "should be");
+ return create_generic(ciType::make(T_LONG), value, zero_checked);
+ }
+ static SharkValue* create_jfloat(llvm::Value* value) {
+ assert(value->getType() == SharkType::jfloat_type(), "should be");
+ return create_generic(ciType::make(T_FLOAT), value, false);
+ }
+ static SharkValue* create_jdouble(llvm::Value* value) {
+ assert(value->getType() == SharkType::jdouble_type(), "should be");
+ return create_generic(ciType::make(T_DOUBLE), value, false);
+ }
+ static SharkValue* create_jobject(llvm::Value* value, bool zero_checked) {
+ assert(value->getType() == SharkType::oop_type(), "should be");
+ return create_generic(ciType::make(T_OBJECT), value, zero_checked);
+ }
+
+ // Typed conversion from constants of various types
+ public:
+ static SharkValue* jint_constant(jint value) {
+ return create_jint(LLVMValue::jint_constant(value), value != 0);
+ }
+ static SharkValue* jlong_constant(jlong value) {
+ return create_jlong(LLVMValue::jlong_constant(value), value != 0);
+ }
+ static SharkValue* jfloat_constant(jfloat value) {
+ return create_jfloat(LLVMValue::jfloat_constant(value));
+ }
+ static SharkValue* jdouble_constant(jdouble value) {
+ return create_jdouble(LLVMValue::jdouble_constant(value));
+ }
+ static SharkValue* null() {
+ return create_jobject(LLVMValue::null(), false);
+ }
+ static inline SharkValue* address_constant(int bci);
+
+ // Type-losing conversions -- use with care!
+ public:
+ virtual llvm::Value* generic_value() const = 0;
+ virtual llvm::Value* intptr_value(SharkBuilder* builder) const;
+
+ static inline SharkValue* create_generic(ciType* type,
+ llvm::Value* value,
+ bool zero_checked);
+ static inline SharkValue* create_phi(ciType* type,
+ llvm::PHINode* phi,
+ const SharkPHIValue* parent = NULL);
+
+ // Phi-style stuff
+ public:
+ virtual void addIncoming(SharkValue* value, llvm::BasicBlock* block);
+ virtual SharkValue* merge(SharkBuilder* builder,
+ SharkValue* other,
+ llvm::BasicBlock* other_block,
+ llvm::BasicBlock* this_block,
+ const char* name) = 0;
+
+ // Repeated null and divide-by-zero check removal
+ public:
+ virtual bool zero_checked() const;
+ virtual void set_zero_checked(bool zero_checked);
+};
+
+class SharkNormalValue : public SharkValue {
+ friend class SharkValue;
+
+ protected:
+ SharkNormalValue(ciType* type, llvm::Value* value, bool zero_checked)
+ : _type(type), _llvm_value(value), _zero_checked(zero_checked) {}
+
+ private:
+ ciType* _type;
+ llvm::Value* _llvm_value;
+ bool _zero_checked;
+
+ private:
+ llvm::Value* llvm_value() const {
+ return _llvm_value;
+ }
+
+ // Cloning
+ public:
+ SharkValue* clone() const;
+
+ // Comparison
+ public:
+ bool equal_to(SharkValue* other) const;
+
+ // Type access
+ public:
+ ciType* type() const;
+ BasicType basic_type() const;
+ int size() const;
+
+ public:
+ bool is_jint() const;
+ bool is_jlong() const;
+ bool is_jfloat() const;
+ bool is_jdouble() const;
+ bool is_jobject() const;
+ bool is_jarray() const;
+
+ // Typed conversions to LLVM values
+ public:
+ llvm::Value* jint_value() const;
+ llvm::Value* jlong_value() const;
+ llvm::Value* jfloat_value() const;
+ llvm::Value* jdouble_value() const;
+ llvm::Value* jobject_value() const;
+ llvm::Value* jarray_value() const;
+
+ // Type-losing conversions, use with care
+ public:
+ llvm::Value* generic_value() const;
+ llvm::Value* intptr_value(SharkBuilder* builder) const;
+
+ // Phi-style stuff
+ public:
+ SharkValue* merge(SharkBuilder* builder,
+ SharkValue* other,
+ llvm::BasicBlock* other_block,
+ llvm::BasicBlock* this_block,
+ const char* name);
+
+ // Repeated null and divide-by-zero check removal
+ public:
+ bool zero_checked() const;
+ void set_zero_checked(bool zero_checked);
+};
+
+class SharkPHIValue : public SharkNormalValue {
+ friend class SharkValue;
+
+ protected:
+ SharkPHIValue(ciType* type, llvm::PHINode* phi, const SharkPHIValue *parent)
+ : SharkNormalValue(type, phi, parent && parent->zero_checked()),
+ _parent(parent),
+ _all_incomers_zero_checked(true) {}
+
+ private:
+ const SharkPHIValue* _parent;
+ bool _all_incomers_zero_checked;
+
+ private:
+ const SharkPHIValue* parent() const {
+ return _parent;
+ }
+ bool is_clone() const {
+ return parent() != NULL;
+ }
+
+ public:
+ bool all_incomers_zero_checked() const {
+ if (is_clone())
+ return parent()->all_incomers_zero_checked();
+
+ return _all_incomers_zero_checked;
+ }
+
+ // Cloning
+ public:
+ SharkValue* clone() const;
+
+ // Casting
+ public:
+ bool is_phi() const;
+ SharkPHIValue* as_phi();
+
+ // Phi-style stuff
+ public:
+ void addIncoming(SharkValue *value, llvm::BasicBlock* block);
+};
+
+class SharkAddressValue : public SharkValue {
+ friend class SharkValue;
+
+ protected:
+ SharkAddressValue(int bci)
+ : _bci(bci) {}
+
+ private:
+ int _bci;
+
+ // Cloning
+ public:
+ SharkValue* clone() const;
+
+ // Comparison
+ public:
+ bool equal_to(SharkValue* other) const;
+
+ // Type access
+ public:
+ BasicType basic_type() const;
+ int size() const;
+ bool is_address() const;
+
+ // Typed conversion from SharkValues
+ public:
+ int address_value() const;
+
+ // Type-losing conversion -- use with care!
+ public:
+ llvm::Value* generic_value() const;
+
+ // Phi-style stuff
+ public:
+ void addIncoming(SharkValue *value, llvm::BasicBlock* block);
+ SharkValue* merge(SharkBuilder* builder,
+ SharkValue* other,
+ llvm::BasicBlock* other_block,
+ llvm::BasicBlock* this_block,
+ const char* name);
+};
+
+// SharkValue methods that can't be declared above
+
+inline SharkValue* SharkValue::create_generic(ciType* type,
+ llvm::Value* value,
+ bool zero_checked) {
+ return new SharkNormalValue(type, value, zero_checked);
+}
+
+inline SharkValue* SharkValue::create_phi(ciType* type,
+ llvm::PHINode* phi,
+ const SharkPHIValue* parent) {
+ return new SharkPHIValue(type, phi, parent);
+}
+
+inline SharkValue* SharkValue::address_constant(int bci) {
+ return new SharkAddressValue(bci);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/shark_globals.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2000, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_shark_globals.cpp.incl"
+
+SHARK_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/shark/shark_globals.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2008, 2009, 2010 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#define SHARK_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
+ \
+ product(intx, MaxNodeLimit, 65000, \
+ "Maximum number of nodes") \
+ \
+ /* inlining */ \
+ product(intx, SharkMaxInlineSize, 32, \
+ "Maximum bytecode size of methods to inline when using Shark") \
+ \
+ /* compiler debugging */ \
+ develop(ccstr, SharkPrintTypeflowOf, NULL, \
+ "Print the typeflow of the specified method") \
+ \
+ diagnostic(ccstr, SharkPrintBitcodeOf, NULL, \
+ "Print the LLVM bitcode of the specified method") \
+ \
+ diagnostic(ccstr, SharkPrintAsmOf, NULL, \
+ "Print the asm of the specified method") \
+ \
+ develop(bool, SharkTraceBytecodes, false, \
+ "Trace bytecode compilation") \
+ \
+ diagnostic(bool, SharkTraceInstalls, false, \
+ "Trace method installation") \
+ \
+ diagnostic(bool, SharkPerformanceWarnings, false, \
+ "Warn about things that could be made faster") \
+
+SHARK_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
--- a/hotspot/src/share/vm/utilities/debug.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/utilities/debug.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -552,140 +552,6 @@
}
}
-
-static void find(intptr_t x, bool print_pc) {
- address addr = (address)x;
-
- CodeBlob* b = CodeCache::find_blob_unsafe(addr);
- if (b != NULL) {
- if (b->is_buffer_blob()) {
- // the interpreter is generated into a buffer blob
- InterpreterCodelet* i = Interpreter::codelet_containing(addr);
- if (i != NULL) {
- i->print();
- return;
- }
- if (Interpreter::contains(addr)) {
- tty->print_cr(INTPTR_FORMAT " is pointing into interpreter code (not bytecode specific)", addr);
- return;
- }
- //
- if (AdapterHandlerLibrary::contains(b)) {
- AdapterHandlerLibrary::print_handler(b);
- }
- // the stubroutines are generated into a buffer blob
- StubCodeDesc* d = StubCodeDesc::desc_for(addr);
- if (d != NULL) {
- d->print();
- if (print_pc) tty->cr();
- return;
- }
- if (StubRoutines::contains(addr)) {
- tty->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", addr);
- return;
- }
- // the InlineCacheBuffer is using stubs generated into a buffer blob
- if (InlineCacheBuffer::contains(addr)) {
- tty->print_cr(INTPTR_FORMAT " is pointing into InlineCacheBuffer", addr);
- return;
- }
- VtableStub* v = VtableStubs::stub_containing(addr);
- if (v != NULL) {
- v->print();
- return;
- }
- }
- if (print_pc && b->is_nmethod()) {
- ResourceMark rm;
- tty->print("%#p: Compiled ", addr);
- ((nmethod*)b)->method()->print_value_on(tty);
- tty->print(" = (CodeBlob*)" INTPTR_FORMAT, b);
- tty->cr();
- return;
- }
- if ( b->is_nmethod()) {
- if (b->is_zombie()) {
- tty->print_cr(INTPTR_FORMAT " is zombie nmethod", b);
- } else if (b->is_not_entrant()) {
- tty->print_cr(INTPTR_FORMAT " is non-entrant nmethod", b);
- }
- }
- b->print();
- return;
- }
-
- if (Universe::heap()->is_in(addr)) {
- HeapWord* p = Universe::heap()->block_start(addr);
- bool print = false;
- // If we couldn't find it it just may mean that heap wasn't parseable
- // See if we were just given an oop directly
- if (p != NULL && Universe::heap()->block_is_obj(p)) {
- print = true;
- } else if (p == NULL && ((oopDesc*)addr)->is_oop()) {
- p = (HeapWord*) addr;
- print = true;
- }
- if (print) {
- oop(p)->print();
- if (p != (HeapWord*)x && oop(p)->is_constMethod() &&
- constMethodOop(p)->contains(addr)) {
- Thread *thread = Thread::current();
- HandleMark hm(thread);
- methodHandle mh (thread, constMethodOop(p)->method());
- if (!mh->is_native()) {
- tty->print_cr("bci_from(%p) = %d; print_codes():",
- addr, mh->bci_from(address(x)));
- mh->print_codes();
- }
- }
- return;
- }
- } else if (Universe::heap()->is_in_reserved(addr)) {
- tty->print_cr(INTPTR_FORMAT " is an unallocated location in the heap", addr);
- return;
- }
-
- if (JNIHandles::is_global_handle((jobject) addr)) {
- tty->print_cr(INTPTR_FORMAT " is a global jni handle", addr);
- return;
- }
- if (JNIHandles::is_weak_global_handle((jobject) addr)) {
- tty->print_cr(INTPTR_FORMAT " is a weak global jni handle", addr);
- return;
- }
- if (JNIHandleBlock::any_contains((jobject) addr)) {
- tty->print_cr(INTPTR_FORMAT " is a local jni handle", addr);
- return;
- }
-
- for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
- // Check for privilege stack
- if (thread->privileged_stack_top() != NULL && thread->privileged_stack_top()->contains(addr)) {
- tty->print_cr(INTPTR_FORMAT " is pointing into the privilege stack for thread: " INTPTR_FORMAT, addr, thread);
- return;
- }
- // If the addr is a java thread print information about that.
- if (addr == (address)thread) {
- thread->print();
- return;
- }
- }
-
- // Try an OS specific find
- if (os::find(addr)) {
- return;
- }
-
- if (print_pc) {
- tty->print_cr(INTPTR_FORMAT ": probably in C++ code; check debugger", addr);
- Disassembler::decode(same_page(addr-40,addr),same_page(addr+40,addr));
- return;
- }
-
- tty->print_cr(INTPTR_FORMAT " is pointing to unknown location", addr);
-}
-
-
class LookForRefInGenClosure : public OopsInGenClosure {
public:
oop target;
@@ -767,7 +633,7 @@
// Can we someday rename the other find to hsfind?
extern "C" void hsfind(intptr_t x) {
Command c("hsfind");
- find(x, false);
+ os::print_location(tty, x, false);
}
@@ -778,13 +644,13 @@
extern "C" void find(intptr_t x) {
Command c("find");
- find(x, false);
+ os::print_location(tty, x, false);
}
extern "C" void findpc(intptr_t x) {
Command c("findpc");
- find(x, true);
+ os::print_location(tty, x, true);
}
--- a/hotspot/src/share/vm/utilities/exceptions.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/utilities/exceptions.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -117,7 +117,7 @@
(address)h_exception(), file, line, thread);
}
// for AbortVMOnException flag
- NOT_PRODUCT(Exceptions::debug_check_abort(h_exception));
+ NOT_PRODUCT(Exceptions::debug_check_abort(h_exception, message));
// Check for special boot-strapping/vm-thread handling
if (special_exception(thread, file, line, h_exception)) return;
@@ -375,17 +375,26 @@
#ifndef PRODUCT
// caller frees value_string if necessary
-void Exceptions::debug_check_abort(const char *value_string) {
+void Exceptions::debug_check_abort(const char *value_string, const char* message) {
if (AbortVMOnException != NULL && value_string != NULL &&
strstr(value_string, AbortVMOnException)) {
- fatal(err_msg("Saw %s, aborting", value_string));
+ if (AbortVMOnExceptionMessage == NULL || message == NULL ||
+ strcmp(message, AbortVMOnExceptionMessage) == 0) {
+ fatal(err_msg("Saw %s, aborting", value_string));
+ }
}
}
-void Exceptions::debug_check_abort(Handle exception) {
+void Exceptions::debug_check_abort(Handle exception, const char* message) {
if (AbortVMOnException != NULL) {
ResourceMark rm;
- debug_check_abort(instanceKlass::cast(exception()->klass())->external_name());
+ if (message == NULL && exception->is_a(SystemDictionary::Throwable_klass())) {
+ oop msg = java_lang_Throwable::message(exception);
+ if (msg != NULL) {
+ message = java_lang_String::as_utf8_string(msg);
+ }
+ }
+ debug_check_abort(instanceKlass::cast(exception()->klass())->external_name(), message);
}
}
#endif
--- a/hotspot/src/share/vm/utilities/exceptions.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/utilities/exceptions.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -143,8 +143,8 @@
static void throw_stack_overflow_exception(Thread* thread, const char* file, int line);
// for AbortVMOnException flag
- NOT_PRODUCT(static void debug_check_abort(Handle exception);)
- NOT_PRODUCT(static void debug_check_abort(const char *value_string);)
+ NOT_PRODUCT(static void debug_check_abort(Handle exception, const char* message = NULL);)
+ NOT_PRODUCT(static void debug_check_abort(const char *value_string, const char* message = NULL);)
};
--- a/hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/utilities/globalDefinitions_gcc.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -216,8 +216,16 @@
#define DEBUG_EXCEPTION ::abort();
+#ifdef ARM
+#ifdef SOLARIS
+#define BREAKPOINT __asm__ volatile (".long 0xe1200070")
+#else
+#define BREAKPOINT __asm__ volatile (".long 0xe7f001f0")
+#endif
+#else
extern "C" void breakpoint();
#define BREAKPOINT ::breakpoint()
+#endif
// checking for nanness
#ifdef SOLARIS
@@ -235,6 +243,12 @@
#error "missing platform-specific definition here"
#endif
+// GCC 4.3 does not allow 0.0/0.0 to produce a NAN value
+#if (__GNUC__ == 4) && (__GNUC_MINOR__ > 2)
+#define CAN_USE_NAN_DEFINE 1
+#endif
+
+
// Checking for finiteness
inline int g_isfinite(jfloat f) { return finite(f); }
--- a/hotspot/src/share/vm/utilities/growableArray.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/utilities/growableArray.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -97,7 +97,10 @@
assert(_len >= 0 && _len <= _max, "initial_len too big");
_arena = (c_heap ? (Arena*)1 : NULL);
set_nesting();
- assert(!c_heap || allocated_on_C_heap(), "growable array must be on C heap if elements are");
+ assert(!on_C_heap() || allocated_on_C_heap(), "growable array must be on C heap if elements are");
+ assert(!on_stack() ||
+ (allocated_on_res_area() || allocated_on_stack()),
+ "growable array must be on stack if elements are not on arena and not on C heap");
}
// This GA will use the given arena for storage.
@@ -108,6 +111,10 @@
assert(_len >= 0 && _len <= _max, "initial_len too big");
_arena = arena;
assert(on_arena(), "arena has taken on reserved value 0 or 1");
+ // Relax next assert to allow object allocation on resource area,
+ // on stack or embedded into an other object.
+ assert(allocated_on_arena() || allocated_on_stack(),
+ "growable array must be on arena or on stack if elements are on arena");
}
void* raw_allocate(int elementSize);
--- a/hotspot/src/share/vm/utilities/macros.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/utilities/macros.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -151,9 +151,11 @@
#if defined(IA32) || defined(AMD64)
#define X86
#define X86_ONLY(code) code
+#define NOT_X86(code)
#else
#undef X86
#define X86_ONLY(code)
+#define NOT_X86(code) code
#endif
#ifdef IA32
@@ -188,4 +190,37 @@
#define NOT_SPARC(code) code
#endif
+#ifdef PPC
+#define PPC_ONLY(code) code
+#define NOT_PPC(code)
+#else
+#define PPC_ONLY(code)
+#define NOT_PPC(code) code
+#endif
+
+#ifdef E500V2
+#define E500V2_ONLY(code) code
+#define NOT_E500V2(code)
+#else
+#define E500V2_ONLY(code)
+#define NOT_E500V2(code) code
+#endif
+
+
+#ifdef ARM
+#define ARM_ONLY(code) code
+#define NOT_ARM(code)
+#else
+#define ARM_ONLY(code)
+#define NOT_ARM(code) code
+#endif
+
+#ifdef JAVASE_EMBEDDED
+#define EMBEDDED_ONLY(code) code
+#define NOT_EMBEDDED(code)
+#else
+#define EMBEDDED_ONLY(code)
+#define NOT_EMBEDDED(code) code
+#endif
+
#define define_pd_global(type, name, value) const type pd_##name = value;
--- a/hotspot/src/share/vm/utilities/vmError.cpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/utilities/vmError.cpp Fri Aug 13 10:55:42 2010 -0700
@@ -479,8 +479,8 @@
if (fr.sp()) {
st->print(", sp=" PTR_FORMAT, fr.sp());
- st->print(", free space=%" INTPTR_FORMAT "k",
- ((intptr_t)fr.sp() - (intptr_t)stack_bottom) >> 10);
+ size_t free_stack_size = pointer_delta(fr.sp(), stack_bottom, 1024);
+ st->print(", free space=" SIZE_FORMAT "k", free_stack_size);
}
st->cr();
@@ -687,16 +687,13 @@
# undef END
}
+VMError* volatile VMError::first_error = NULL;
+volatile jlong VMError::first_error_tid = -1;
void VMError::report_and_die() {
// Don't allocate large buffer on stack
static char buffer[O_BUFLEN];
- // First error, and its thread id. We must be able to handle native thread,
- // so use thread id instead of Thread* to identify thread.
- static VMError* first_error;
- static jlong first_error_tid;
-
// An error could happen before tty is initialized or after it has been
// destroyed. Here we use a very simple unbuffered fdStream for printing.
// Only out.print_raw() and out.print_raw_cr() should be used, as other
--- a/hotspot/src/share/vm/utilities/vmError.hpp Wed Jul 05 17:19:35 2017 +0200
+++ b/hotspot/src/share/vm/utilities/vmError.hpp Fri Aug 13 10:55:42 2010 -0700
@@ -57,6 +57,10 @@
int _current_step;
const char * _current_step_info;
int _verbose;
+ // First error, and its thread id. We must be able to handle native thread,
+ // so use thread id instead of Thread* to identify thread.
+ static VMError* volatile first_error;
+ static volatile jlong first_error_tid;
// used by reporting about OOM
size_t _size;
@@ -108,4 +112,7 @@
// returns original handler for signal, if it was resetted, or NULL if
// signal was not changed by error reporter
static address get_resetted_sighandler(int sig);
+
+ // check to see if fatal error reporting is in progress
+ static bool fatal_error_in_progress() { return first_error != NULL; }
};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6973329/Test.java Fri Aug 13 10:55:42 2010 -0700
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 6973329
+ * @summary C2 with Zero based COOP produces code with broken anti-dependency on x86
+ *
+ * @run main/othervm -Xbatch -Xcomp -XX:CompileOnly=Test Test
+ */
+
+class A {
+ A next;
+ int n;
+
+ public int get_n() {
+ return n+1;
+ }
+}
+public class Test {
+
+ A a;
+
+ void test (A new_next) {
+ A prev_next = a.next;
+ a.next = new_next;
+ if (prev_next == null) {
+ a.n = a.get_n();
+ }
+ }
+
+ public static void main(String args[]) {
+ Test t = new Test();
+ t.a = new A();
+ t.a.n = 1;
+ t.test(new A());
+ if (t.a.n != 2) {
+ System.out.println("Wrong value: " + t.a.n + " expected: 2");
+ System.exit(97);
+ }
+ }
+}
+