--- a/.hgtags-top-repo Mon Mar 18 10:46:49 2013 -0400
+++ b/.hgtags-top-repo Wed Jul 05 18:45:01 2017 +0200
@@ -202,3 +202,4 @@
fd1a5574cf68af24bfd52decc37ac6361afb278a jdk8-b78
91d35211e74464dca5edf9b66ab01d0d0d8cded7 jdk8-b79
907a926d3c96472f357617b48b6b968ea855c23c jdk8-b80
+145dbc56f931c134e837b675b9e6e7bf08902e93 jdk8-b81
--- a/Makefile Mon Mar 18 10:46:49 2013 -0400
+++ b/Makefile Wed Jul 05 18:45:01 2017 +0200
@@ -90,6 +90,7 @@
include ./make/jaxp-rules.gmk
include ./make/jaxws-rules.gmk
include ./make/jdk-rules.gmk
+include ./make/nashorn-rules.gmk
include ./make/install-rules.gmk
include ./make/sponsors-rules.gmk
include ./make/deploy-rules.gmk
@@ -174,6 +175,11 @@
clobber:: jdk-clobber
endif
+ifeq ($(BUILD_NASHORN), true)
+ generic_build_repo_series:: $(NASHORN)
+ clobber:: nashorn-clobber
+endif
+
ifeq ($(BUILD_DEPLOY), true)
generic_build_repo_series:: $(DEPLOY)
clobber:: deploy-clobber
@@ -336,6 +342,7 @@
BUILD_HOTSPOT=false \
BUILD_JDK=false \
BUILD_LANGTOOLS=false \
+ BUILD_NASHORN=false \
BUILD_CORBA=false \
BUILD_JAXP=false \
BUILD_JAXWS=false \
--- a/common/autoconf/configure.ac Mon Mar 18 10:46:49 2013 -0400
+++ b/common/autoconf/configure.ac Wed Jul 05 18:45:01 2017 +0200
@@ -194,6 +194,7 @@
###############################################################################
JDKOPT_SETUP_BUILD_TWEAKS
+JDKOPT_DETECT_INTREE_EC
###############################################################################
#
--- a/common/autoconf/generated-configure.sh Mon Mar 18 10:46:49 2013 -0400
+++ b/common/autoconf/generated-configure.sh Wed Jul 05 18:45:01 2017 +0200
@@ -612,6 +612,7 @@
JOBS
MEMORY_SIZE
NUM_CORES
+ENABLE_INTREE_EC
SALIB_NAME
HOTSPOT_MAKE_ARGS
FIXPATH
@@ -749,6 +750,7 @@
OVERRIDE_SRC_ROOT
ADD_SRC_ROOT
JDK_TOPDIR
+NASHORN_TOPDIR
HOTSPOT_TOPDIR
JAXWS_TOPDIR
JAXP_TOPDIR
@@ -3751,7 +3753,7 @@
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
-DATE_WHEN_GENERATED=1363150186
+DATE_WHEN_GENERATED=1363706268
###############################################################################
#
@@ -10785,6 +10787,12 @@
###############################################################################
#
+# Enable or disable the elliptic curve crypto implementation
+#
+
+
+###############################################################################
+#
# Compress jars
#
COMPRESS_JARS=false
@@ -15682,6 +15690,7 @@
JAXP_TOPDIR="$SRC_ROOT/jaxp"
JAXWS_TOPDIR="$SRC_ROOT/jaxws"
HOTSPOT_TOPDIR="$SRC_ROOT/hotspot"
+NASHORN_TOPDIR="$SRC_ROOT/nashorn"
JDK_TOPDIR="$SRC_ROOT/jdk"
@@ -15692,6 +15701,7 @@
+
###############################################################################
#
# Pickup additional source for a component from outside of the source root
@@ -15922,6 +15932,19 @@
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes with $HOTSPOT_TOPDIR" >&5
$as_echo "yes with $HOTSPOT_TOPDIR" >&6; }
fi
+if test "x$with_override_nashorn" != x; then
+ CURDIR="$PWD"
+ cd "$with_override_nashorn"
+ NASHORN_TOPDIR="`pwd`"
+ cd "$CURDIR"
+ if ! test -f $NASHORN_TOPDIR/makefiles/BuildNashorn.gmk; then
+ as_fn_error $? "You have to override nashorn with a full nashorn repo!" "$LINENO" 5
+ fi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if nashorn should be overridden" >&5
+$as_echo_n "checking if nashorn should be overridden... " >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes with $NASHORN_TOPDIR" >&5
+$as_echo "yes with $NASHORN_TOPDIR" >&6; }
+fi
if test "x$with_override_jdk" != x; then
CURDIR="$PWD"
cd "$with_override_jdk"
@@ -18534,14 +18557,18 @@
### Locate C compiler (CC)
-# gcc is almost always present, but on Windows we
-# prefer cl.exe and on Solaris we prefer CC.
-# Thus test for them in this order.
-if test "x$OPENJDK_TARGET_OS" = xmacosx; then
- # Do not probe for cc on MacOSX.
- COMPILER_CHECK_LIST="cl gcc"
-else
- COMPILER_CHECK_LIST="cl cc gcc"
+# On windows, only cl.exe is supported.
+# On Solaris, cc is preferred to gcc.
+# Elsewhere, gcc is preferred to cc.
+
+if test "x$CC" != x; then
+ COMPILER_CHECK_LIST="$CC"
+elif test "x$OPENJDK_TARGET_OS" = "xwindows"; then
+ COMPILER_CHECK_LIST="cl"
+elif test "x$OPENJDK_TARGET_OS" = "xsolaris"; then
+ COMPILER_CHECK_LIST="cc gcc"
+else
+ COMPILER_CHECK_LIST="gcc cc"
fi
@@ -19505,7 +19532,7 @@
$as_echo "$as_me: Using $COMPILER_VENDOR $COMPILER_NAME compiler version $COMPILER_VERSION (located at $COMPILER)" >&6;}
-# Now that we have resolved CC ourself, let autoconf have it's go at it
+# Now that we have resolved CC ourself, let autoconf have its go at it
ac_ext=c
ac_cpp='$CPP $CPPFLAGS'
ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
@@ -20107,12 +20134,16 @@
### Locate C++ compiler (CXX)
-if test "x$OPENJDK_TARGET_OS" = xmacosx; then
- # Do not probe for CC on MacOSX.
- COMPILER_CHECK_LIST="cl g++"
-else
- COMPILER_CHECK_LIST="cl CC g++"
-fi
+if test "x$CXX" != x; then
+ COMPILER_CHECK_LIST="$CXX"
+elif test "x$OPENJDK_TARGET_OS" = "xwindows"; then
+ COMPILER_CHECK_LIST="cl"
+elif test "x$OPENJDK_TARGET_OS" = "xsolaris"; then
+ COMPILER_CHECK_LIST="CC g++"
+else
+ COMPILER_CHECK_LIST="g++ CC"
+fi
+
COMPILER_NAME=C++
@@ -21074,7 +21105,7 @@
$as_echo "$as_me: Using $COMPILER_VENDOR $COMPILER_NAME compiler version $COMPILER_VERSION (located at $COMPILER)" >&6;}
-# Now that we have resolved CXX ourself, let autoconf have it's go at it
+# Now that we have resolved CXX ourself, let autoconf have its go at it
ac_ext=cpp
ac_cpp='$CXXCPP $CPPFLAGS'
ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
@@ -29799,7 +29830,7 @@
_ACEOF
X11_A_OK=yes
else
- X11_A_OK=no
+ X11_A_OK=no; break
fi
done
@@ -31670,6 +31701,22 @@
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if elliptic curve crypto implementation is present" >&5
+$as_echo_n "checking if elliptic curve crypto implementation is present... " >&6; }
+
+if test -d "${SRC_ROOT}/jdk/src/share/native/sun/security/ec/impl"; then
+ ENABLE_INTREE_EC=yes
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ ENABLE_INTREE_EC=no
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+
+
###############################################################################
#
# Configure parts of the build that only affect the build performance,
--- a/common/autoconf/jdk-options.m4 Mon Mar 18 10:46:49 2013 -0400
+++ b/common/autoconf/jdk-options.m4 Wed Jul 05 18:45:01 2017 +0200
@@ -366,6 +366,25 @@
###############################################################################
#
+# Enable or disable the elliptic curve crypto implementation
+#
+AC_DEFUN_ONCE([JDKOPT_DETECT_INTREE_EC],
+[
+AC_MSG_CHECKING([if elliptic curve crypto implementation is present])
+
+if test -d "${SRC_ROOT}/jdk/src/share/native/sun/security/ec/impl"; then
+ ENABLE_INTREE_EC=yes
+ AC_MSG_RESULT([yes])
+else
+ ENABLE_INTREE_EC=no
+ AC_MSG_RESULT([no])
+fi
+
+AC_SUBST(ENABLE_INTREE_EC)
+])
+
+###############################################################################
+#
# Compress jars
#
COMPRESS_JARS=false
--- a/common/autoconf/libraries.m4 Mon Mar 18 10:46:49 2013 -0400
+++ b/common/autoconf/libraries.m4 Wed Jul 05 18:45:01 2017 +0200
@@ -182,7 +182,7 @@
# Need to include Xlib.h and Xutil.h to avoid "present but cannot be compiled" warnings on Solaris 10
AC_CHECK_HEADERS([X11/extensions/shape.h X11/extensions/Xrender.h X11/extensions/XTest.h],
[X11_A_OK=yes],
- [X11_A_OK=no],
+ [X11_A_OK=no; break],
[ # include <X11/Xlib.h>
# include <X11/Xutil.h>
])
--- a/common/autoconf/source-dirs.m4 Mon Mar 18 10:46:49 2013 -0400
+++ b/common/autoconf/source-dirs.m4 Wed Jul 05 18:45:01 2017 +0200
@@ -33,12 +33,14 @@
JAXP_TOPDIR="$SRC_ROOT/jaxp"
JAXWS_TOPDIR="$SRC_ROOT/jaxws"
HOTSPOT_TOPDIR="$SRC_ROOT/hotspot"
+NASHORN_TOPDIR="$SRC_ROOT/nashorn"
JDK_TOPDIR="$SRC_ROOT/jdk"
AC_SUBST(LANGTOOLS_TOPDIR)
AC_SUBST(CORBA_TOPDIR)
AC_SUBST(JAXP_TOPDIR)
AC_SUBST(JAXWS_TOPDIR)
AC_SUBST(HOTSPOT_TOPDIR)
+AC_SUBST(NASHORN_TOPDIR)
AC_SUBST(JDK_TOPDIR)
])
@@ -233,7 +235,18 @@
fi
AC_MSG_CHECKING([if hotspot should be overridden])
AC_MSG_RESULT([yes with $HOTSPOT_TOPDIR])
-fi
+fi
+if test "x$with_override_nashorn" != x; then
+ CURDIR="$PWD"
+ cd "$with_override_nashorn"
+ NASHORN_TOPDIR="`pwd`"
+ cd "$CURDIR"
+ if ! test -f $NASHORN_TOPDIR/makefiles/BuildNashorn.gmk; then
+ AC_MSG_ERROR([You have to override nashorn with a full nashorn repo!])
+ fi
+ AC_MSG_CHECKING([if nashorn should be overridden])
+ AC_MSG_RESULT([yes with $NASHORN_TOPDIR])
+fi
if test "x$with_override_jdk" != x; then
CURDIR="$PWD"
cd "$with_override_jdk"
--- a/common/autoconf/spec.gmk.in Mon Mar 18 10:46:49 2013 -0400
+++ b/common/autoconf/spec.gmk.in Wed Jul 05 18:45:01 2017 +0200
@@ -141,6 +141,7 @@
JAXP_TOPDIR:=@JAXP_TOPDIR@
JAXWS_TOPDIR:=@JAXWS_TOPDIR@
HOTSPOT_TOPDIR:=@HOTSPOT_TOPDIR@
+NASHORN_TOPDIR:=@NASHORN_TOPDIR@
COPYRIGHT_YEAR:=@COPYRIGHT_YEAR@
# Location where build customization files may be found
@@ -230,6 +231,7 @@
JAXWS_OUTPUTDIR=$(BUILD_OUTPUT)/jaxws
HOTSPOT_OUTPUTDIR=$(BUILD_OUTPUT)/hotspot
JDK_OUTPUTDIR=$(BUILD_OUTPUT)/jdk
+NASHORN_OUTPUTDIR=$(BUILD_OUTPUT)/nashorn
IMAGES_OUTPUTDIR=$(BUILD_OUTPUT)/images
JCE_OUTPUTDIR=$(BUILD_OUTPUT)/jce-release
@@ -238,6 +240,7 @@
JAXP_DIST=$(JAXP_OUTPUTDIR)/dist
JAXWS_DIST=$(JAXWS_OUTPUTDIR)/dist
HOTSPOT_DIST=@HOTSPOT_DIST@
+NASHORN_DIST=$(NASHORN_OUTPUTDIR)/dist
BUILD_HOTSPOT=@BUILD_HOTSPOT@
@@ -536,6 +539,7 @@
# Build setup
ENABLE_JFR=@ENABLE_JFR@
+ENABLE_INTREE_EC=@ENABLE_INTREE_EC@
USE_EXTERNAL_LIBJPEG:=@USE_EXTERNAL_LIBJPEG@
USE_EXTERNAL_LIBGIF:=@USE_EXTERNAL_LIBGIF@
USE_EXTERNAL_LIBZ:=@USE_EXTERNAL_LIBZ@
--- a/common/autoconf/toolchain.m4 Mon Mar 18 10:46:49 2013 -0400
+++ b/common/autoconf/toolchain.m4 Wed Jul 05 18:45:01 2017 +0200
@@ -249,30 +249,38 @@
### Locate C compiler (CC)
-# gcc is almost always present, but on Windows we
-# prefer cl.exe and on Solaris we prefer CC.
-# Thus test for them in this order.
-if test "x$OPENJDK_TARGET_OS" = xmacosx; then
- # Do not probe for cc on MacOSX.
- COMPILER_CHECK_LIST="cl gcc"
+# On windows, only cl.exe is supported.
+# On Solaris, cc is preferred to gcc.
+# Elsewhere, gcc is preferred to cc.
+
+if test "x$CC" != x; then
+ COMPILER_CHECK_LIST="$CC"
+elif test "x$OPENJDK_TARGET_OS" = "xwindows"; then
+ COMPILER_CHECK_LIST="cl"
+elif test "x$OPENJDK_TARGET_OS" = "xsolaris"; then
+ COMPILER_CHECK_LIST="cc gcc"
else
- COMPILER_CHECK_LIST="cl cc gcc"
+ COMPILER_CHECK_LIST="gcc cc"
fi
TOOLCHAIN_FIND_COMPILER([CC],[C],[$COMPILER_CHECK_LIST])
-# Now that we have resolved CC ourself, let autoconf have it's go at it
+# Now that we have resolved CC ourself, let autoconf have its go at it
AC_PROG_CC([$CC])
### Locate C++ compiler (CXX)
-if test "x$OPENJDK_TARGET_OS" = xmacosx; then
- # Do not probe for CC on MacOSX.
- COMPILER_CHECK_LIST="cl g++"
+if test "x$CXX" != x; then
+ COMPILER_CHECK_LIST="$CXX"
+elif test "x$OPENJDK_TARGET_OS" = "xwindows"; then
+ COMPILER_CHECK_LIST="cl"
+elif test "x$OPENJDK_TARGET_OS" = "xsolaris"; then
+ COMPILER_CHECK_LIST="CC g++"
else
- COMPILER_CHECK_LIST="cl CC g++"
+ COMPILER_CHECK_LIST="g++ CC"
fi
+
TOOLCHAIN_FIND_COMPILER([CXX],[C++],[$COMPILER_CHECK_LIST])
-# Now that we have resolved CXX ourself, let autoconf have it's go at it
+# Now that we have resolved CXX ourself, let autoconf have its go at it
AC_PROG_CXX([$CXX])
### Locate other tools
--- a/common/bin/hgforest.sh Mon Mar 18 10:46:49 2013 -0400
+++ b/common/bin/hgforest.sh Wed Jul 05 18:45:01 2017 +0200
@@ -96,7 +96,7 @@
repos=""
repos_extra=""
if [ "${command}" = "clone" -o "${command}" = "fclone" ] ; then
- subrepos="corba jaxp jaxws langtools jdk hotspot"
+ subrepos="corba jaxp jaxws langtools jdk hotspot nashorn"
if [ -f .hg/hgrc ] ; then
pull_default=`hg paths default`
if [ "${pull_default}" = "" ] ; then
--- a/common/makefiles/Main.gmk Mon Mar 18 10:46:49 2013 -0400
+++ b/common/makefiles/Main.gmk Wed Jul 05 18:45:01 2017 +0200
@@ -120,6 +120,12 @@
@($(CD) $(JDK_TOPDIR)/makefiles && $(BUILD_LOG_WRAPPER) $(MAKE) $(MAKE_ARGS) -f BuildJdk.gmk $(JDK_TARGET))
@$(call TargetExit)
+nashorn: jdk nashorn-only
+nashorn-only: start-make
+ @$(call TargetEnter)
+ @($(CD) $(NASHORN_TOPDIR)/makefiles && $(BUILD_LOG_WRAPPER) $(MAKE) $(MAKE_ARGS) -f BuildNashorn.gmk)
+ @$(call TargetExit)
+
demos: jdk demos-only
demos-only: start-make
@$(call TargetEnter)
@@ -128,7 +134,7 @@
# Note: This double-colon rule is intentional, to support
# custom make file integration.
-images:: source-tips demos images-only
+images:: source-tips demos nashorn images-only
images-only: start-make
@$(call TargetEnter)
@($(CD) $(JDK_TOPDIR)/makefiles && $(BUILD_LOG_WRAPPER) $(MAKE) $(MAKE_ARGS) -f BuildJdk.gmk images)
@@ -175,9 +181,10 @@
@$(ECHO) Boot cycle build step 2: Building a new JDK image using previously built image
@($(CD) $(SRC_ROOT)/common/makefiles && $(BUILD_LOG_WRAPPER) $(MAKE) SPEC=$(dir $(SPEC))bootcycle-spec.gmk images)
-test: start-make
+test: images test-only
+test-only: start-make
@$(call TargetEnter)
- @($(CD) $(SRC_ROOT)/test && $(BUILD_LOG_WRAPPER) $(MAKE) -j1 -k MAKEFLAGS= PRODUCT_HOME=$(OUTPUT_ROOT)/jdk JPRT_JAVA_HOME=$(OUTPUT_ROOT)/jdk ALT_OUTPUTDIR=$(OUTPUT_ROOT) $(TEST)) || true
+ @($(CD) $(SRC_ROOT)/test && $(BUILD_LOG_WRAPPER) $(MAKE) -j1 -k MAKEFLAGS= PRODUCT_HOME=$(JDK_IMAGE_DIR) JPRT_JAVA_HOME=$(JDK_IMAGE_DIR) ALT_OUTPUTDIR=$(OUTPUT_ROOT) $(TEST)) || true
@$(call TargetExit)
# Stores the tips for each repository. This file is be used when constructing the jdk image and can be
@@ -190,7 +197,7 @@
# Remove everything, except the output from configure.
-clean: clean-langtools clean-corba clean-jaxp clean-jaxws clean-hotspot clean-jdk clean-images clean-overlay-images clean-bootcycle-build clean-docs
+clean: clean-langtools clean-corba clean-jaxp clean-jaxws clean-hotspot clean-jdk clean-nashorn clean-images clean-overlay-images clean-bootcycle-build clean-docs
@($(CD) $(OUTPUT_ROOT) && $(RM) -r tmp source_tips build.log* build-trace*.log*)
@$(ECHO) Cleaned all build artifacts.
@@ -220,6 +227,8 @@
$(call CleanComponent,hotspot)
clean-jdk:
$(call CleanComponent,jdk)
+clean-nashorn:
+ $(call CleanComponent,nashorn)
clean-images:
$(call CleanComponent,images)
clean-overlay-images:
@@ -230,10 +239,10 @@
$(call CleanComponent,docs)
$(call CleanComponent,docstemp)
-.PHONY: langtools corba jaxp jaxws hotspot jdk images overlay-images install
-.PHONY: langtools-only corba-only jaxp-only jaxws-only hotspot-only jdk-only images-only overlay-images-only install-only
+.PHONY: langtools corba jaxp jaxws hotspot jdk nashorn images overlay-images install
+.PHONY: langtools-only corba-only jaxp-only jaxws-only hotspot-only jdk-only nashorn-only images-only overlay-images-only install-only
.PHONY: all test clean dist-clean bootcycle-images start-make
-.PHONY: clean-langtools clean-corba clean-jaxp clean-jaxws clean-hotspot clean-jdk clean-images clean-overlay-images clean-bootcycle-build
+.PHONY: clean-langtools clean-corba clean-jaxp clean-jaxws clean-hotspot clean-jdk clean-nashorn clean-images clean-overlay-images clean-bootcycle-build
.PHONY: profiles profiles-only profiles-oscheck
FRC: # Force target
--- a/common/makefiles/MakeBase.gmk Mon Mar 18 10:46:49 2013 -0400
+++ b/common/makefiles/MakeBase.gmk Wed Jul 05 18:45:01 2017 +0200
@@ -51,8 +51,9 @@
-e 's|X98|$(OUTPUT_ROOT)|g' -e 's|X97|$(SRC_ROOT)|g' \
-e 's|X00|X|g' | tr '\n' '$2'
+# Subst in an extra $ to prevent it from disappearing.
define ListPathsSafely_If
- $(if $(word $3,$($1)),$(eval $1_LPS$3:=$(call compress_paths,$(wordlist $3,$4,$($1)))))
+ $(if $(word $3,$($1)),$(eval $1_LPS$3:=$(call compress_paths,$(subst $$,$$$$,$(wordlist $3,$4,$($1))))))
endef
define ListPathsSafely_Printf
--- a/common/makefiles/javadoc/NON_CORE_PKGS.gmk Mon Mar 18 10:46:49 2013 -0400
+++ b/common/makefiles/javadoc/NON_CORE_PKGS.gmk Wed Jul 05 18:45:01 2017 +0200
@@ -80,7 +80,8 @@
TREEAPI_PKGS = com.sun.source.doctree \
com.sun.source.tree \
- com.sun.source.util
+ com.sun.source.util \
+ jdk
SMARTCARDIO_PKGS = javax.smartcardio
@@ -93,6 +94,8 @@
com.apple.eio
endif
+JDK_PKGS = jdk
+
# non-core packages in rt.jar
NON_CORE_PKGS = $(DOMAPI_PKGS) \
$(MGMT_PKGS) \
@@ -103,5 +106,5 @@
$(HTTPSERVER_PKGS) \
$(SMARTCARDIO_PKGS) \
$(SCTPAPI_PKGS) \
- $(APPLE_EXT_PKGS)
-
+ $(APPLE_EXT_PKGS) \
+ $(JDK_PKGS)
--- a/hotspot/.hgtags Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/.hgtags Wed Jul 05 18:45:01 2017 +0200
@@ -322,3 +322,5 @@
df5396524152118535c36da5801d828b560d19a2 hs25-b21
4a198b201f3ce84433fa94a3ca65d061473e7c4c jdk8-b80
dd6350b4abc4a6c19c89dd982cc0e4f3d119885c hs25-b22
+65b797426a3bec6e91b64085a0cfb94adadb634a jdk8-b81
+0631ebcc45f05c73b09a56c2586685af1f781c1d hs25-b23
--- a/hotspot/agent/src/os/linux/ps_core.c Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/agent/src/os/linux/ps_core.c Wed Jul 05 18:45:01 2017 +0200
@@ -132,12 +132,12 @@
}
// Part of the class sharing workaround
-static map_info* add_class_share_map_info(struct ps_prochandle* ph, off_t offset,
+static void add_class_share_map_info(struct ps_prochandle* ph, off_t offset,
uintptr_t vaddr, size_t memsz) {
map_info* map;
if ((map = allocate_init_map(ph->core->classes_jsa_fd,
offset, vaddr, memsz)) == NULL) {
- return NULL;
+ return;
}
map->next = ph->core->class_share_maps;
--- a/hotspot/make/bsd/makefiles/gcc.make Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/make/bsd/makefiles/gcc.make Wed Jul 05 18:45:01 2017 +0200
@@ -168,12 +168,12 @@
# conversions which might affect the values. To avoid that, we need to turn
# it off explicitly.
ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
-ACCEPTABLE_WARNINGS = -Wpointer-arith -Wsign-compare
+WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef
else
-ACCEPTABLE_WARNINGS = -Wpointer-arith -Wconversion -Wsign-compare
+WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef
endif
-CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(ACCEPTABLE_WARNINGS)
+CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS)
# Special cases
CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@))
# XXXDARWIN: for _dyld_bind_fully_image_containing_address
--- a/hotspot/make/excludeSrc.make Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/make/excludeSrc.make Wed Jul 05 18:45:01 2017 +0200
@@ -69,7 +69,7 @@
CXXFLAGS += -DINCLUDE_CDS=0
CFLAGS += -DINCLUDE_CDS=0
- Src_Files_EXCLUDE += metaspaceShared.cpp
+ Src_Files_EXCLUDE += filemap.cpp metaspaceShared.cpp
endif
ifeq ($(INCLUDE_ALL_GCS), false)
--- a/hotspot/make/hotspot_version Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/make/hotspot_version Wed Jul 05 18:45:01 2017 +0200
@@ -35,7 +35,7 @@
HS_MAJOR_VER=25
HS_MINOR_VER=0
-HS_BUILD_NUMBER=22
+HS_BUILD_NUMBER=23
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
--- a/hotspot/make/linux/makefiles/gcc.make Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/make/linux/makefiles/gcc.make Wed Jul 05 18:45:01 2017 +0200
@@ -131,12 +131,12 @@
# conversions which might affect the values. To avoid that, we need to turn
# it off explicitly.
ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
-ACCEPTABLE_WARNINGS = -Wpointer-arith -Wsign-compare
+WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef
else
-ACCEPTABLE_WARNINGS = -Wpointer-arith -Wconversion -Wsign-compare
+WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef
endif
-CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(ACCEPTABLE_WARNINGS)
+CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS)
# Special cases
CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@))
--- a/hotspot/make/solaris/makefiles/gcc.make Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/make/solaris/makefiles/gcc.make Wed Jul 05 18:45:01 2017 +0200
@@ -118,8 +118,8 @@
# Compiler warnings are treated as errors
WARNINGS_ARE_ERRORS = -Werror
# Enable these warnings. See 'info gcc' about details on these options
-ADDITIONAL_WARNINGS = -Wpointer-arith -Wconversion -Wsign-compare
-CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(ADDITIONAL_WARNINGS)
+WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef
+CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS)
# Special cases
CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@))
--- a/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -2194,7 +2194,8 @@
int callee_locals_size,
frame* caller,
frame* interpreter_frame,
- bool is_top_frame) {
+ bool is_top_frame,
+ bool is_bottom_frame) {
assert(popframe_extra_args == 0, "NEED TO FIX");
// NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -1581,7 +1581,8 @@
int callee_local_count,
frame* caller,
frame* interpreter_frame,
- bool is_top_frame) {
+ bool is_top_frame,
+ bool is_bottom_frame) {
// Note: This calculation must exactly parallel the frame setup
// in InterpreterGenerator::generate_fixed_frame.
// If f!=NULL, set up the following variables:
@@ -1664,6 +1665,15 @@
int delta = local_words - parm_words;
int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
*interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
+ if (!is_bottom_frame) {
+ // Llast_SP is set below for the current frame to SP (with the
+ // extra space for the callee's locals). Here we adjust
+ // Llast_SP for the caller's frame, removing the extra space
+ // for the current method's locals.
+ *caller->register_addr(Llast_SP) = *interpreter_frame->register_addr(I5_savedSP);
+ } else {
+ assert(*caller->register_addr(Llast_SP) >= *interpreter_frame->register_addr(I5_savedSP), "strange Llast_SP");
+ }
} else {
assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
// Don't have Lesp available; lay out locals block in the caller
--- a/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -2361,7 +2361,8 @@
int callee_locals,
frame* caller,
frame* interpreter_frame,
- bool is_top_frame) {
+ bool is_top_frame,
+ bool is_bottom_frame) {
assert(popframe_extra_args == 0, "FIX ME");
// NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
--- a/hotspot/src/cpu/x86/vm/frame_x86.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/cpu/x86/vm/frame_x86.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -356,7 +356,7 @@
// Verifies the calculated original PC of a deoptimization PC for the
// given unextended SP. The unextended SP might also be the saved SP
// for MethodHandle call sites.
-#if ASSERT
+#ifdef ASSERT
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) {
frame fr;
--- a/hotspot/src/cpu/x86/vm/frame_x86.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/cpu/x86/vm/frame_x86.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -170,7 +170,7 @@
return (intptr_t*) addr_at(offset);
}
-#if ASSERT
+#ifdef ASSERT
// Used in frame::sender_for_{interpreter,compiled}_frame
static void verify_deopt_original_pc( nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return = false);
static void verify_deopt_mh_original_pc(nmethod* nm, intptr_t* unextended_sp) {
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -1585,7 +1585,8 @@
int callee_locals,
frame* caller,
frame* interpreter_frame,
- bool is_top_frame) {
+ bool is_top_frame,
+ bool is_bottom_frame) {
// Note: This calculation must exactly parallel the frame setup
// in AbstractInterpreterGenerator::generate_method_entry.
// If interpreter_frame!=NULL, set up the method, locals, and monitors.
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -1599,7 +1599,8 @@
int callee_locals,
frame* caller,
frame* interpreter_frame,
- bool is_top_frame) {
+ bool is_top_frame,
+ bool is_bottom_frame) {
// Note: This calculation must exactly parallel the frame setup
// in AbstractInterpreterGenerator::generate_method_entry.
// If interpreter_frame!=NULL, set up the method, locals, and monitors.
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -919,7 +919,8 @@
int callee_locals,
frame* caller,
frame* interpreter_frame,
- bool is_top_frame) {
+ bool is_top_frame,
+ bool is_bottom_frame) {
assert(popframe_extra_args == 0, "what to do?");
assert(!is_top_frame || (!callee_locals && !callee_param_count),
"top frame should have no caller");
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -57,6 +57,7 @@
#include "runtime/threadCritical.hpp"
#include "runtime/timer.hpp"
#include "services/attachListener.hpp"
+#include "services/memTracker.hpp"
#include "services/runtimeService.hpp"
#include "utilities/decoder.hpp"
#include "utilities/defaultStream.hpp"
@@ -2275,13 +2276,25 @@
return NULL;
}
+ // The memory is committed
+ address pc = CALLER_PC;
+ MemTracker::record_virtual_memory_reserve((address)addr, bytes, pc);
+ MemTracker::record_virtual_memory_commit((address)addr, bytes, pc);
+
return addr;
}
bool os::release_memory_special(char* base, size_t bytes) {
// detaching the SHM segment will also delete it, see reserve_memory_special()
int rslt = shmdt(base);
- return rslt == 0;
+ if (rslt == 0) {
+ MemTracker::record_virtual_memory_uncommit((address)base, bytes);
+ MemTracker::record_virtual_memory_release((address)base, bytes);
+ return true;
+ } else {
+ return false;
+ }
+
}
size_t os::large_page_size() {
--- a/hotspot/src/os/linux/vm/globals_linux.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/os/linux/vm/globals_linux.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,6 +40,9 @@
product(bool, UseHugeTLBFS, false, \
"Use MAP_HUGETLB for large pages") \
\
+ product(bool, LoadExecStackDllInVMThread, true, \
+ "Load DLLs with executable-stack attribute in the VM Thread") \
+ \
product(bool, UseSHM, false, \
"Use SYSV shared memory for large pages")
--- a/hotspot/src/os/linux/vm/os_linux.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/os/linux/vm/os_linux.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -44,6 +44,7 @@
#include "runtime/extendedPC.hpp"
#include "runtime/globals.hpp"
#include "runtime/interfaceSupport.hpp"
+#include "runtime/init.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
@@ -57,10 +58,12 @@
#include "runtime/threadCritical.hpp"
#include "runtime/timer.hpp"
#include "services/attachListener.hpp"
+#include "services/memTracker.hpp"
#include "services/runtimeService.hpp"
#include "utilities/decoder.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/events.hpp"
+#include "utilities/elfFile.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/vmError.hpp"
@@ -1796,9 +1799,93 @@
// in case of error it checks if .dll/.so was built for the
// same architecture as Hotspot is running on
+
+// Remember the stack's state. The Linux dynamic linker will change
+// the stack to 'executable' at most once, so we must safepoint only once.
+bool os::Linux::_stack_is_executable = false;
+
+// VM operation that loads a library. This is necessary if stack protection
+// of the Java stacks can be lost during loading the library. If we
+// do not stop the Java threads, they can stack overflow before the stacks
+// are protected again.
+class VM_LinuxDllLoad: public VM_Operation {
+ private:
+ const char *_filename;
+ void *_lib;
+ public:
+ VM_LinuxDllLoad(const char *fn) :
+ _filename(fn), _lib(NULL) {}
+ VMOp_Type type() const { return VMOp_LinuxDllLoad; }
+ void doit() {
+ _lib = os::Linux::dll_load_inner(_filename);
+ os::Linux::_stack_is_executable = true;
+ }
+ void* loaded_library() { return _lib; }
+};
+
void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
{
- void * result= ::dlopen(filename, RTLD_LAZY);
+ void * result = NULL;
+ bool load_attempted = false;
+
+ // Check whether the library to load might change execution rights
+ // of the stack. If they are changed, the protection of the stack
+ // guard pages will be lost. We need a safepoint to fix this.
+ //
+ // See Linux man page execstack(8) for more info.
+ if (os::uses_stack_guard_pages() && !os::Linux::_stack_is_executable) {
+ ElfFile ef(filename);
+ if (!ef.specifies_noexecstack()) {
+ if (!is_init_completed()) {
+ os::Linux::_stack_is_executable = true;
+ // This is OK - No Java threads have been created yet, and hence no
+ // stack guard pages to fix.
+ //
+ // This should happen only when you are building JDK7 using a very
+ // old version of JDK6 (e.g., with JPRT) and running test_gamma.
+ //
+ // Dynamic loader will make all stacks executable after
+ // this function returns, and will not do that again.
+ assert(Threads::first() == NULL, "no Java threads should exist yet.");
+ } else {
+ warning("You have loaded library %s which might have disabled stack guard. "
+ "The VM will try to fix the stack guard now.\n"
+ "It's highly recommended that you fix the library with "
+ "'execstack -c <libfile>', or link it with '-z noexecstack'.",
+ filename);
+
+ assert(Thread::current()->is_Java_thread(), "must be Java thread");
+ JavaThread *jt = JavaThread::current();
+ if (jt->thread_state() != _thread_in_native) {
+ // This happens when a compiler thread tries to load a hsdis-<arch>.so file
+ // that requires ExecStack. Cannot enter safe point. Let's give up.
+ warning("Unable to fix stack guard. Giving up.");
+ } else {
+ if (!LoadExecStackDllInVMThread) {
+ // This is for the case where the DLL has an static
+ // constructor function that executes JNI code. We cannot
+ // load such DLLs in the VMThread.
+ result = ::dlopen(filename, RTLD_LAZY);
+ }
+
+ ThreadInVMfromNative tiv(jt);
+ debug_only(VMNativeEntryWrapper vew;)
+
+ VM_LinuxDllLoad op(filename);
+ VMThread::execute(&op);
+ if (LoadExecStackDllInVMThread) {
+ result = op.loaded_library();
+ }
+ load_attempted = true;
+ }
+ }
+ }
+ }
+
+ if (!load_attempted) {
+ result = ::dlopen(filename, RTLD_LAZY);
+ }
+
if (result != NULL) {
// Successful loading
return result;
@@ -1952,6 +2039,38 @@
return NULL;
}
+void * os::Linux::dll_load_inner(const char *filename) {
+ void * result = NULL;
+ if (LoadExecStackDllInVMThread) {
+ result = ::dlopen(filename, RTLD_LAZY);
+ }
+
+ // Since 7019808, libjvm.so is linked with -noexecstack. If the VM loads a
+ // library that requires an executable stack, or which does not have this
+ // stack attribute set, dlopen changes the stack attribute to executable. The
+ // read protection of the guard pages gets lost.
+ //
+ // Need to check _stack_is_executable again as multiple VM_LinuxDllLoad
+ // may have been queued at the same time.
+
+ if (!_stack_is_executable) {
+ JavaThread *jt = Threads::first();
+
+ while (jt) {
+ if (!jt->stack_guard_zone_unused() && // Stack not yet fully initialized
+ jt->stack_yellow_zone_enabled()) { // No pending stack overflow exceptions
+ if (!os::guard_memory((char *) jt->stack_red_zone_base() - jt->stack_red_zone_size(),
+ jt->stack_yellow_zone_size() + jt->stack_red_zone_size())) {
+ warning("Attempt to reguard stack yellow zone failed.");
+ }
+ }
+ jt = jt->next();
+ }
+ }
+
+ return result;
+}
+
/*
* glibc-2.0 libdl is not MT safe. If you are building with any glibc,
* chances are you might want to run the generated bits against glibc-2.0
@@ -3094,13 +3213,24 @@
numa_make_global(addr, bytes);
}
+ // The memory is committed
+ address pc = CALLER_PC;
+ MemTracker::record_virtual_memory_reserve((address)addr, bytes, pc);
+ MemTracker::record_virtual_memory_commit((address)addr, bytes, pc);
+
return addr;
}
bool os::release_memory_special(char* base, size_t bytes) {
// detaching the SHM segment will also delete it, see reserve_memory_special()
int rslt = shmdt(base);
- return rslt == 0;
+ if (rslt == 0) {
+ MemTracker::record_virtual_memory_uncommit((address)base, bytes);
+ MemTracker::record_virtual_memory_release((address)base, bytes);
+ return true;
+ } else {
+ return false;
+ }
}
size_t os::large_page_size() {
--- a/hotspot/src/os/linux/vm/os_linux.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/os/linux/vm/os_linux.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -94,6 +94,9 @@
static void print_libversion_info(outputStream* st);
public:
+ static bool _stack_is_executable;
+ static void *dll_load_inner(const char *name);
+
static void init_thread_fpu_state();
static int get_fpu_control_word();
static void set_fpu_control_word(int fpu_control);
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -2945,7 +2945,7 @@
while (p < (uint64_t)end) {
addrs[0] = p;
size_t addrs_count = 1;
- while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] < (uint64_t)end) {
+ while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
addrs_count++;
}
@@ -3420,13 +3420,25 @@
if ((retAddr != NULL) && UseNUMAInterleaving) {
numa_make_global(retAddr, size);
}
+
+ // The memory is committed
+ address pc = CALLER_PC;
+ MemTracker::record_virtual_memory_reserve((address)retAddr, size, pc);
+ MemTracker::record_virtual_memory_commit((address)retAddr, size, pc);
+
return retAddr;
}
bool os::release_memory_special(char* base, size_t bytes) {
// detaching the SHM segment will also delete it, see reserve_memory_special()
int rslt = shmdt(base);
- return rslt == 0;
+ if (rslt == 0) {
+ MemTracker::record_virtual_memory_uncommit((address)base, bytes);
+ MemTracker::record_virtual_memory_release((address)base, bytes);
+ return true;
+ } else {
+ return false;
+ }
}
size_t os::large_page_size() {
--- a/hotspot/src/os/windows/vm/os_windows.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/os/windows/vm/os_windows.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -60,6 +60,7 @@
#include "runtime/threadCritical.hpp"
#include "runtime/timer.hpp"
#include "services/attachListener.hpp"
+#include "services/memTracker.hpp"
#include "services/runtimeService.hpp"
#include "utilities/decoder.hpp"
#include "utilities/defaultStream.hpp"
@@ -2836,7 +2837,7 @@
PAGE_READWRITE);
// If reservation failed, return NULL
if (p_buf == NULL) return NULL;
-
+ MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
os::release_memory(p_buf, bytes + chunk_size);
// we still need to round up to a page boundary (in case we are using large pages)
@@ -2898,6 +2899,11 @@
if (next_alloc_addr > p_buf) {
// Some memory was committed so release it.
size_t bytes_to_release = bytes - bytes_remaining;
+ // NMT has yet to record any individual blocks, so it
+ // need to create a dummy 'reserve' record to match
+ // the release.
+ MemTracker::record_virtual_memory_reserve((address)p_buf,
+ bytes_to_release, CALLER_PC);
os::release_memory(p_buf, bytes_to_release);
}
#ifdef ASSERT
@@ -2909,10 +2915,19 @@
#endif
return NULL;
}
+
bytes_remaining -= bytes_to_rq;
next_alloc_addr += bytes_to_rq;
count++;
}
+ // Although the memory is allocated individually, it is returned as one.
+ // NMT records it as one block.
+ address pc = CALLER_PC;
+ MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, pc);
+ if ((flags & MEM_COMMIT) != 0) {
+ MemTracker::record_virtual_memory_commit((address)p_buf, bytes, pc);
+ }
+
// made it this far, success
return p_buf;
}
@@ -3099,11 +3114,20 @@
// normal policy just allocate it all at once
DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot);
+ if (res != NULL) {
+ address pc = CALLER_PC;
+ MemTracker::record_virtual_memory_reserve((address)res, bytes, pc);
+ MemTracker::record_virtual_memory_commit((address)res, bytes, pc);
+ }
+
return res;
}
}
bool os::release_memory_special(char* base, size_t bytes) {
+ assert(base != NULL, "Sanity check");
+ // Memory allocated via reserve_memory_special() is committed
+ MemTracker::record_virtual_memory_uncommit((address)base, bytes);
return release_memory(base, bytes);
}
--- a/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -516,7 +516,7 @@
// here if the underlying file has been truncated.
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
- nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
+ nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : NULL;
if (nm != NULL && nm->has_unsafe_access()) {
stub = StubRoutines::handler_for_unsafe_access();
}
--- a/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -410,6 +410,11 @@
// to handle_unexpected_exception way down below.
thread->disable_stack_red_zone();
tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
+
+ // This is a likely cause, but hard to verify. Let's just print
+ // it as a hint.
+ tty->print_raw_cr("Please check if any of your loaded .so files has "
+ "enabled executable stack (see man page execstack(8))");
} else {
// Accessing stack address below sp may cause SEGV if current
// thread has MAP_GROWSDOWN stack. This should only happen when
--- a/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -305,6 +305,11 @@
// to handle_unexpected_exception way down below.
thread->disable_stack_red_zone();
tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
+
+ // This is a likely cause, but hard to verify. Let's just print
+ // it as a hint.
+ tty->print_raw_cr("Please check if any of your loaded .so files has "
+ "enabled executable stack (see man page execstack(8))");
} else {
// Accessing stack address below sp may cause SEGV if current
// thread has MAP_GROWSDOWN stack. This should only happen when
--- a/hotspot/src/share/vm/c1/c1_LIR.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/c1/c1_LIR.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -2375,7 +2375,7 @@
// collects all register operands of the instruction
void visit(LIR_Op* op);
-#if ASSERT
+#ifdef ASSERT
// check that an operation has no operands
bool no_operands(LIR_Op* op);
#endif
--- a/hotspot/src/share/vm/ci/ciTypeFlow.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/ci/ciTypeFlow.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -2540,7 +2540,7 @@
} else if (innermost->head() == blk) {
// If loop header, complete the tree pointers
if (blk->loop() != innermost) {
-#if ASSERT
+#ifdef ASSERT
assert(blk->loop()->head() == innermost->head(), "same head");
Loop* dl;
for (dl = innermost; dl != NULL && dl != blk->loop(); dl = dl->parent());
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -90,8 +90,7 @@
// Extension method support.
#define JAVA_8_VERSION 52
-
-void ClassFileParser::parse_constant_pool_entries(ClassLoaderData* loader_data, constantPoolHandle cp, int length, TRAPS) {
+void ClassFileParser::parse_constant_pool_entries(int length, TRAPS) {
// Use a local copy of ClassFileStream. It helps the C++ compiler to optimize
// this function (_current can be allocated in a register, with scalar
// replacement of aggregates). The _current pointer is copied back to
@@ -104,7 +103,7 @@
assert(cfs->allocated_on_stack(),"should be local");
u1* old_current = cfs0->current();
#endif
- Handle class_loader(THREAD, loader_data->class_loader());
+ Handle class_loader(THREAD, _loader_data->class_loader());
// Used for batching symbol allocations.
const char* names[SymbolTable::symbol_alloc_batch_size];
@@ -124,7 +123,7 @@
{
cfs->guarantee_more(3, CHECK); // name_index, tag/access_flags
u2 name_index = cfs->get_u2_fast();
- cp->klass_index_at_put(index, name_index);
+ _cp->klass_index_at_put(index, name_index);
}
break;
case JVM_CONSTANT_Fieldref :
@@ -132,7 +131,7 @@
cfs->guarantee_more(5, CHECK); // class_index, name_and_type_index, tag/access_flags
u2 class_index = cfs->get_u2_fast();
u2 name_and_type_index = cfs->get_u2_fast();
- cp->field_at_put(index, class_index, name_and_type_index);
+ _cp->field_at_put(index, class_index, name_and_type_index);
}
break;
case JVM_CONSTANT_Methodref :
@@ -140,7 +139,7 @@
cfs->guarantee_more(5, CHECK); // class_index, name_and_type_index, tag/access_flags
u2 class_index = cfs->get_u2_fast();
u2 name_and_type_index = cfs->get_u2_fast();
- cp->method_at_put(index, class_index, name_and_type_index);
+ _cp->method_at_put(index, class_index, name_and_type_index);
}
break;
case JVM_CONSTANT_InterfaceMethodref :
@@ -148,14 +147,14 @@
cfs->guarantee_more(5, CHECK); // class_index, name_and_type_index, tag/access_flags
u2 class_index = cfs->get_u2_fast();
u2 name_and_type_index = cfs->get_u2_fast();
- cp->interface_method_at_put(index, class_index, name_and_type_index);
+ _cp->interface_method_at_put(index, class_index, name_and_type_index);
}
break;
case JVM_CONSTANT_String :
{
cfs->guarantee_more(3, CHECK); // string_index, tag/access_flags
u2 string_index = cfs->get_u2_fast();
- cp->string_index_at_put(index, string_index);
+ _cp->string_index_at_put(index, string_index);
}
break;
case JVM_CONSTANT_MethodHandle :
@@ -174,11 +173,11 @@
cfs->guarantee_more(4, CHECK); // ref_kind, method_index, tag/access_flags
u1 ref_kind = cfs->get_u1_fast();
u2 method_index = cfs->get_u2_fast();
- cp->method_handle_index_at_put(index, ref_kind, method_index);
+ _cp->method_handle_index_at_put(index, ref_kind, method_index);
} else if (tag == JVM_CONSTANT_MethodType) {
cfs->guarantee_more(3, CHECK); // signature_index, tag/access_flags
u2 signature_index = cfs->get_u2_fast();
- cp->method_type_index_at_put(index, signature_index);
+ _cp->method_type_index_at_put(index, signature_index);
} else {
ShouldNotReachHere();
}
@@ -200,21 +199,21 @@
u2 name_and_type_index = cfs->get_u2_fast();
if (_max_bootstrap_specifier_index < (int) bootstrap_specifier_index)
_max_bootstrap_specifier_index = (int) bootstrap_specifier_index; // collect for later
- cp->invoke_dynamic_at_put(index, bootstrap_specifier_index, name_and_type_index);
+ _cp->invoke_dynamic_at_put(index, bootstrap_specifier_index, name_and_type_index);
}
break;
case JVM_CONSTANT_Integer :
{
cfs->guarantee_more(5, CHECK); // bytes, tag/access_flags
u4 bytes = cfs->get_u4_fast();
- cp->int_at_put(index, (jint) bytes);
+ _cp->int_at_put(index, (jint) bytes);
}
break;
case JVM_CONSTANT_Float :
{
cfs->guarantee_more(5, CHECK); // bytes, tag/access_flags
u4 bytes = cfs->get_u4_fast();
- cp->float_at_put(index, *(jfloat*)&bytes);
+ _cp->float_at_put(index, *(jfloat*)&bytes);
}
break;
case JVM_CONSTANT_Long :
@@ -225,7 +224,7 @@
{
cfs->guarantee_more(9, CHECK); // bytes, tag/access_flags
u8 bytes = cfs->get_u8_fast();
- cp->long_at_put(index, bytes);
+ _cp->long_at_put(index, bytes);
}
index++; // Skip entry following eigth-byte constant, see JVM book p. 98
break;
@@ -237,7 +236,7 @@
{
cfs->guarantee_more(9, CHECK); // bytes, tag/access_flags
u8 bytes = cfs->get_u8_fast();
- cp->double_at_put(index, *(jdouble*)&bytes);
+ _cp->double_at_put(index, *(jdouble*)&bytes);
}
index++; // Skip entry following eigth-byte constant, see JVM book p. 98
break;
@@ -246,7 +245,7 @@
cfs->guarantee_more(5, CHECK); // name_index, signature_index, tag/access_flags
u2 name_index = cfs->get_u2_fast();
u2 signature_index = cfs->get_u2_fast();
- cp->name_and_type_at_put(index, name_index, signature_index);
+ _cp->name_and_type_at_put(index, name_index, signature_index);
}
break;
case JVM_CONSTANT_Utf8 :
@@ -283,11 +282,11 @@
indices[names_count] = index;
hashValues[names_count++] = hash;
if (names_count == SymbolTable::symbol_alloc_batch_size) {
- SymbolTable::new_symbols(loader_data, cp, names_count, names, lengths, indices, hashValues, CHECK);
+ SymbolTable::new_symbols(_loader_data, _cp, names_count, names, lengths, indices, hashValues, CHECK);
names_count = 0;
}
} else {
- cp->symbol_at_put(index, result);
+ _cp->symbol_at_put(index, result);
}
}
break;
@@ -300,7 +299,7 @@
// Allocate the remaining symbols
if (names_count > 0) {
- SymbolTable::new_symbols(loader_data, cp, names_count, names, lengths, indices, hashValues, CHECK);
+ SymbolTable::new_symbols(_loader_data, _cp, names_count, names, lengths, indices, hashValues, CHECK);
}
// Copy _current pointer of local copy back to stream().
@@ -310,23 +309,6 @@
cfs0->set_current(cfs1.current());
}
-// This class unreferences constant pool symbols if an error has occurred
-// while parsing the class before it is assigned into the class.
-// If it gets an error after that it is unloaded and the constant pool will
-// be cleaned up then.
-class ConstantPoolCleaner : public StackObj {
- constantPoolHandle _cphandle;
- bool _in_error;
- public:
- ConstantPoolCleaner(constantPoolHandle cp) : _cphandle(cp), _in_error(true) {}
- ~ConstantPoolCleaner() {
- if (_in_error && _cphandle.not_null()) {
- _cphandle->unreference_symbols();
- }
- }
- void set_in_error(bool clean) { _in_error = clean; }
-};
-
bool inline valid_cp_range(int index, int length) { return (index > 0 && index < length); }
inline Symbol* check_symbol_at(constantPoolHandle cp, int index) {
@@ -336,7 +318,7 @@
return NULL;
}
-constantPoolHandle ClassFileParser::parse_constant_pool(ClassLoaderData* loader_data, TRAPS) {
+constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
ClassFileStream* cfs = stream();
constantPoolHandle nullHandle;
@@ -345,16 +327,13 @@
guarantee_property(
length >= 1, "Illegal constant pool size %u in class file %s",
length, CHECK_(nullHandle));
- ConstantPool* constant_pool =
- ConstantPool::allocate(loader_data,
- length,
- CHECK_(nullHandle));
+ ConstantPool* constant_pool = ConstantPool::allocate(_loader_data, length,
+ CHECK_(nullHandle));
+ _cp = constant_pool; // save in case of errors
constantPoolHandle cp (THREAD, constant_pool);
- ConstantPoolCleaner cp_in_error(cp); // set constant pool to be cleaned up.
-
// parsing constant pool entries
- parse_constant_pool_entries(loader_data, cp, length, CHECK_(nullHandle));
+ parse_constant_pool_entries(length, CHECK_(nullHandle));
int index = 1; // declared outside of loops for portability
@@ -373,8 +352,7 @@
if (!_need_verify) break;
int klass_ref_index = cp->klass_ref_index_at(index);
int name_and_type_ref_index = cp->name_and_type_ref_index_at(index);
- check_property(valid_cp_range(klass_ref_index, length) &&
- is_klass_reference(cp, klass_ref_index),
+ check_property(valid_klass_reference_at(klass_ref_index),
"Invalid constant pool index %u in class file %s",
klass_ref_index,
CHECK_(nullHandle));
@@ -404,16 +382,12 @@
if (!_need_verify) break;
int name_ref_index = cp->name_ref_index_at(index);
int signature_ref_index = cp->signature_ref_index_at(index);
- check_property(
- valid_cp_range(name_ref_index, length) &&
- cp->tag_at(name_ref_index).is_utf8(),
- "Invalid constant pool index %u in class file %s",
- name_ref_index, CHECK_(nullHandle));
- check_property(
- valid_cp_range(signature_ref_index, length) &&
- cp->tag_at(signature_ref_index).is_utf8(),
- "Invalid constant pool index %u in class file %s",
- signature_ref_index, CHECK_(nullHandle));
+ check_property(valid_symbol_at(name_ref_index),
+ "Invalid constant pool index %u in class file %s",
+ name_ref_index, CHECK_(nullHandle));
+ check_property(valid_symbol_at(signature_ref_index),
+ "Invalid constant pool index %u in class file %s",
+ signature_ref_index, CHECK_(nullHandle));
break;
}
case JVM_CONSTANT_Utf8 :
@@ -425,22 +399,18 @@
case JVM_CONSTANT_ClassIndex :
{
int class_index = cp->klass_index_at(index);
- check_property(
- valid_cp_range(class_index, length) &&
- cp->tag_at(class_index).is_utf8(),
- "Invalid constant pool index %u in class file %s",
- class_index, CHECK_(nullHandle));
+ check_property(valid_symbol_at(class_index),
+ "Invalid constant pool index %u in class file %s",
+ class_index, CHECK_(nullHandle));
cp->unresolved_klass_at_put(index, cp->symbol_at(class_index));
}
break;
case JVM_CONSTANT_StringIndex :
{
int string_index = cp->string_index_at(index);
- check_property(
- valid_cp_range(string_index, length) &&
- cp->tag_at(string_index).is_utf8(),
- "Invalid constant pool index %u in class file %s",
- string_index, CHECK_(nullHandle));
+ check_property(valid_symbol_at(string_index),
+ "Invalid constant pool index %u in class file %s",
+ string_index, CHECK_(nullHandle));
Symbol* sym = cp->symbol_at(string_index);
cp->unresolved_string_at_put(index, sym);
}
@@ -491,12 +461,9 @@
case JVM_CONSTANT_MethodType :
{
int ref_index = cp->method_type_index_at(index);
- check_property(
- valid_cp_range(ref_index, length) &&
- cp->tag_at(ref_index).is_utf8() &&
- EnableInvokeDynamic,
- "Invalid constant pool index %u in class file %s",
- ref_index, CHECK_(nullHandle));
+ check_property(valid_symbol_at(ref_index) && EnableInvokeDynamic,
+ "Invalid constant pool index %u in class file %s",
+ ref_index, CHECK_(nullHandle));
}
break;
case JVM_CONSTANT_InvokeDynamic :
@@ -541,7 +508,6 @@
}
if (!_need_verify) {
- cp_in_error.set_in_error(false);
return cp;
}
@@ -664,7 +630,6 @@
} // end of switch
} // end of for
- cp_in_error.set_in_error(false);
return cp;
}
@@ -786,93 +751,92 @@
}
-Array<Klass*>* ClassFileParser::parse_interfaces(constantPoolHandle cp,
- int length,
- ClassLoaderData* loader_data,
+Array<Klass*>* ClassFileParser::parse_interfaces(int length,
Handle protection_domain,
Symbol* class_name,
bool* has_default_methods,
TRAPS) {
- ClassFileStream* cfs = stream();
- assert(length > 0, "only called for length>0");
- // FIXME: Leak at later OOM.
- Array<Klass*>* interfaces = MetadataFactory::new_array<Klass*>(loader_data, length, NULL, CHECK_NULL);
-
- int index;
- for (index = 0; index < length; index++) {
- u2 interface_index = cfs->get_u2(CHECK_NULL);
- KlassHandle interf;
- check_property(
- valid_cp_range(interface_index, cp->length()) &&
- is_klass_reference(cp, interface_index),
- "Interface name has bad constant pool index %u in class file %s",
- interface_index, CHECK_NULL);
- if (cp->tag_at(interface_index).is_klass()) {
- interf = KlassHandle(THREAD, cp->resolved_klass_at(interface_index));
- } else {
- Symbol* unresolved_klass = cp->klass_name_at(interface_index);
-
- // Don't need to check legal name because it's checked when parsing constant pool.
- // But need to make sure it's not an array type.
- guarantee_property(unresolved_klass->byte_at(0) != JVM_SIGNATURE_ARRAY,
- "Bad interface name in class file %s", CHECK_NULL);
- Handle class_loader(THREAD, loader_data->class_loader());
-
- // Call resolve_super so classcircularity is checked
- Klass* k = SystemDictionary::resolve_super_or_fail(class_name,
- unresolved_klass, class_loader, protection_domain,
- false, CHECK_NULL);
- interf = KlassHandle(THREAD, k);
+ if (length == 0) {
+ _local_interfaces = Universe::the_empty_klass_array();
+ } else {
+ ClassFileStream* cfs = stream();
+ assert(length > 0, "only called for length>0");
+ _local_interfaces = MetadataFactory::new_array<Klass*>(_loader_data, length, NULL, CHECK_NULL);
+
+ int index;
+ for (index = 0; index < length; index++) {
+ u2 interface_index = cfs->get_u2(CHECK_NULL);
+ KlassHandle interf;
+ check_property(
+ valid_klass_reference_at(interface_index),
+ "Interface name has bad constant pool index %u in class file %s",
+ interface_index, CHECK_NULL);
+ if (_cp->tag_at(interface_index).is_klass()) {
+ interf = KlassHandle(THREAD, _cp->resolved_klass_at(interface_index));
+ } else {
+ Symbol* unresolved_klass = _cp->klass_name_at(interface_index);
+
+ // Don't need to check legal name because it's checked when parsing constant pool.
+ // But need to make sure it's not an array type.
+ guarantee_property(unresolved_klass->byte_at(0) != JVM_SIGNATURE_ARRAY,
+ "Bad interface name in class file %s", CHECK_NULL);
+ Handle class_loader(THREAD, _loader_data->class_loader());
+
+ // Call resolve_super so classcircularity is checked
+ Klass* k = SystemDictionary::resolve_super_or_fail(class_name,
+ unresolved_klass, class_loader, protection_domain,
+ false, CHECK_NULL);
+ interf = KlassHandle(THREAD, k);
+ }
+
+ if (!interf()->is_interface()) {
+ THROW_MSG_(vmSymbols::java_lang_IncompatibleClassChangeError(), "Implementing class", NULL);
+ }
+ if (InstanceKlass::cast(interf())->has_default_methods()) {
+ *has_default_methods = true;
+ }
+ _local_interfaces->at_put(index, interf());
}
- if (!interf()->is_interface()) {
- THROW_MSG_(vmSymbols::java_lang_IncompatibleClassChangeError(), "Implementing class", NULL);
- }
- if (InstanceKlass::cast(interf())->has_default_methods()) {
- *has_default_methods = true;
+ if (!_need_verify || length <= 1) {
+ return _local_interfaces;
}
- interfaces->at_put(index, interf());
- }
-
- if (!_need_verify || length <= 1) {
- return interfaces;
- }
-
- // Check if there's any duplicates in interfaces
- ResourceMark rm(THREAD);
- NameSigHash** interface_names = NEW_RESOURCE_ARRAY_IN_THREAD(
- THREAD, NameSigHash*, HASH_ROW_SIZE);
- initialize_hashtable(interface_names);
- bool dup = false;
- {
- debug_only(No_Safepoint_Verifier nsv;)
- for (index = 0; index < length; index++) {
- Klass* k = interfaces->at(index);
- Symbol* name = InstanceKlass::cast(k)->name();
- // If no duplicates, add (name, NULL) in hashtable interface_names.
- if (!put_after_lookup(name, NULL, interface_names)) {
- dup = true;
- break;
+
+ // Check if there's any duplicates in interfaces
+ ResourceMark rm(THREAD);
+ NameSigHash** interface_names = NEW_RESOURCE_ARRAY_IN_THREAD(
+ THREAD, NameSigHash*, HASH_ROW_SIZE);
+ initialize_hashtable(interface_names);
+ bool dup = false;
+ {
+ debug_only(No_Safepoint_Verifier nsv;)
+ for (index = 0; index < length; index++) {
+ Klass* k = _local_interfaces->at(index);
+ Symbol* name = InstanceKlass::cast(k)->name();
+ // If no duplicates, add (name, NULL) in hashtable interface_names.
+ if (!put_after_lookup(name, NULL, interface_names)) {
+ dup = true;
+ break;
+ }
}
}
- }
- if (dup) {
- classfile_parse_error("Duplicate interface name in class file %s", CHECK_NULL);
+ if (dup) {
+ classfile_parse_error("Duplicate interface name in class file %s", CHECK_NULL);
+ }
}
-
- return interfaces;
+ return _local_interfaces;
}
-void ClassFileParser::verify_constantvalue(int constantvalue_index, int signature_index, constantPoolHandle cp, TRAPS) {
+void ClassFileParser::verify_constantvalue(int constantvalue_index, int signature_index, TRAPS) {
// Make sure the constant pool entry is of a type appropriate to this field
guarantee_property(
(constantvalue_index > 0 &&
- constantvalue_index < cp->length()),
+ constantvalue_index < _cp->length()),
"Bad initial value index %u in ConstantValue attribute in class file %s",
constantvalue_index, CHECK);
- constantTag value_type = cp->tag_at(constantvalue_index);
- switch ( cp->basic_type_for_signature_at(signature_index) ) {
+ constantTag value_type = _cp->tag_at(constantvalue_index);
+ switch ( _cp->basic_type_for_signature_at(signature_index) ) {
case T_LONG:
guarantee_property(value_type.is_long(), "Inconsistent constant value type in class file %s", CHECK);
break;
@@ -886,7 +850,7 @@
guarantee_property(value_type.is_int(), "Inconsistent constant value type in class file %s", CHECK);
break;
case T_OBJECT:
- guarantee_property((cp->symbol_at(signature_index)->equals("Ljava/lang/String;")
+ guarantee_property((_cp->symbol_at(signature_index)->equals("Ljava/lang/String;")
&& value_type.is_string()),
"Bad string initial value in class file %s", CHECK);
break;
@@ -899,15 +863,11 @@
// Parse attributes for a field.
-void ClassFileParser::parse_field_attributes(ClassLoaderData* loader_data,
- constantPoolHandle cp,
- u2 attributes_count,
+void ClassFileParser::parse_field_attributes(u2 attributes_count,
bool is_static, u2 signature_index,
u2* constantvalue_index_addr,
bool* is_synthetic_addr,
u2* generic_signature_index_addr,
- AnnotationArray** field_annotations,
- AnnotationArray** field_type_annotations,
ClassFileParser::FieldAnnotationCollector* parsed_annotations,
TRAPS) {
ClassFileStream* cfs = stream();
@@ -927,12 +887,11 @@
cfs->guarantee_more(6, CHECK); // attribute_name_index, attribute_length
u2 attribute_name_index = cfs->get_u2_fast();
u4 attribute_length = cfs->get_u4_fast();
- check_property(valid_cp_range(attribute_name_index, cp->length()) &&
- cp->tag_at(attribute_name_index).is_utf8(),
+ check_property(valid_symbol_at(attribute_name_index),
"Invalid field attribute index %u in class file %s",
attribute_name_index,
CHECK);
- Symbol* attribute_name = cp->symbol_at(attribute_name_index);
+ Symbol* attribute_name = _cp->symbol_at(attribute_name_index);
if (is_static && attribute_name == vmSymbols::tag_constant_value()) {
// ignore if non-static
if (constantvalue_index != 0) {
@@ -944,7 +903,7 @@
attribute_length, CHECK);
constantvalue_index = cfs->get_u2(CHECK);
if (_need_verify) {
- verify_constantvalue(constantvalue_index, signature_index, cp, CHECK);
+ verify_constantvalue(constantvalue_index, signature_index, CHECK);
}
} else if (attribute_name == vmSymbols::tag_synthetic()) {
if (attribute_length != 0) {
@@ -971,10 +930,8 @@
runtime_visible_annotations_length = attribute_length;
runtime_visible_annotations = cfs->get_u1_buffer();
assert(runtime_visible_annotations != NULL, "null visible annotations");
- parse_annotations(loader_data,
- runtime_visible_annotations,
+ parse_annotations(runtime_visible_annotations,
runtime_visible_annotations_length,
- cp,
parsed_annotations,
CHECK);
cfs->skip_u1(runtime_visible_annotations_length, CHECK);
@@ -1004,18 +961,18 @@
*constantvalue_index_addr = constantvalue_index;
*is_synthetic_addr = is_synthetic;
*generic_signature_index_addr = generic_signature_index;
- *field_annotations = assemble_annotations(loader_data,
- runtime_visible_annotations,
+ AnnotationArray* a = assemble_annotations(runtime_visible_annotations,
runtime_visible_annotations_length,
runtime_invisible_annotations,
runtime_invisible_annotations_length,
CHECK);
- *field_type_annotations = assemble_annotations(loader_data,
- runtime_visible_type_annotations,
- runtime_visible_type_annotations_length,
- runtime_invisible_type_annotations,
- runtime_invisible_type_annotations_length,
- CHECK);
+ parsed_annotations->set_field_annotations(a);
+ a = assemble_annotations(runtime_visible_type_annotations,
+ runtime_visible_type_annotations_length,
+ runtime_invisible_type_annotations,
+ runtime_invisible_type_annotations_length,
+ CHECK);
+ parsed_annotations->set_field_type_annotations(a);
return;
}
@@ -1106,13 +1063,9 @@
}
};
-Array<u2>* ClassFileParser::parse_fields(ClassLoaderData* loader_data,
- Symbol* class_name,
- constantPoolHandle cp,
+Array<u2>* ClassFileParser::parse_fields(Symbol* class_name,
bool is_interface,
FieldAllocationCount *fac,
- Array<AnnotationArray*>** fields_annotations,
- Array<AnnotationArray*>** fields_type_annotations,
u2* java_fields_count_ptr, TRAPS) {
ClassFileStream* cfs = stream();
cfs->guarantee_more(2, CHECK_NULL); // length
@@ -1147,8 +1100,6 @@
u2* fa = NEW_RESOURCE_ARRAY_IN_THREAD(
THREAD, u2, total_fields * (FieldInfo::field_slots + 1));
- AnnotationArray* field_annotations = NULL;
- AnnotationArray* field_type_annotations = NULL;
// The generic signature slots start after all other fields' data.
int generic_signature_slot = total_fields * FieldInfo::field_slots;
int num_generic_signature = 0;
@@ -1161,53 +1112,52 @@
access_flags.set_flags(flags);
u2 name_index = cfs->get_u2_fast();
- int cp_size = cp->length();
- check_property(
- valid_cp_range(name_index, cp_size) && cp->tag_at(name_index).is_utf8(),
+ int cp_size = _cp->length();
+ check_property(valid_symbol_at(name_index),
"Invalid constant pool index %u for field name in class file %s",
- name_index, CHECK_NULL);
- Symbol* name = cp->symbol_at(name_index);
+ name_index,
+ CHECK_NULL);
+ Symbol* name = _cp->symbol_at(name_index);
verify_legal_field_name(name, CHECK_NULL);
u2 signature_index = cfs->get_u2_fast();
- check_property(
- valid_cp_range(signature_index, cp_size) &&
- cp->tag_at(signature_index).is_utf8(),
+ check_property(valid_symbol_at(signature_index),
"Invalid constant pool index %u for field signature in class file %s",
signature_index, CHECK_NULL);
- Symbol* sig = cp->symbol_at(signature_index);
+ Symbol* sig = _cp->symbol_at(signature_index);
verify_legal_field_signature(name, sig, CHECK_NULL);
u2 constantvalue_index = 0;
bool is_synthetic = false;
u2 generic_signature_index = 0;
bool is_static = access_flags.is_static();
- FieldAnnotationCollector parsed_annotations;
+ FieldAnnotationCollector parsed_annotations(_loader_data);
u2 attributes_count = cfs->get_u2_fast();
if (attributes_count > 0) {
- parse_field_attributes(loader_data,
- cp, attributes_count, is_static, signature_index,
+ parse_field_attributes(attributes_count, is_static, signature_index,
&constantvalue_index, &is_synthetic,
- &generic_signature_index, &field_annotations,
- &field_type_annotations, &parsed_annotations,
+ &generic_signature_index, &parsed_annotations,
CHECK_NULL);
- if (field_annotations != NULL) {
- if (*fields_annotations == NULL) {
- *fields_annotations = MetadataFactory::new_array<AnnotationArray*>(
- loader_data, length, NULL,
+ if (parsed_annotations.field_annotations() != NULL) {
+ if (_fields_annotations == NULL) {
+ _fields_annotations = MetadataFactory::new_array<AnnotationArray*>(
+ _loader_data, length, NULL,
CHECK_NULL);
}
- (*fields_annotations)->at_put(n, field_annotations);
+ _fields_annotations->at_put(n, parsed_annotations.field_annotations());
+ parsed_annotations.set_field_annotations(NULL);
}
- if (field_type_annotations != NULL) {
- if (*fields_type_annotations == NULL) {
- *fields_type_annotations = MetadataFactory::new_array<AnnotationArray*>(
- loader_data, length, NULL,
+ if (parsed_annotations.field_type_annotations() != NULL) {
+ if (_fields_type_annotations == NULL) {
+ _fields_type_annotations = MetadataFactory::new_array<AnnotationArray*>(
+ _loader_data, length, NULL,
CHECK_NULL);
}
- (*fields_type_annotations)->at_put(n, field_type_annotations);
+ _fields_type_annotations->at_put(n, parsed_annotations.field_type_annotations());
+ parsed_annotations.set_field_type_annotations(NULL);
}
+
if (is_synthetic) {
access_flags.set_is_synthetic();
}
@@ -1224,7 +1174,7 @@
name_index,
signature_index,
constantvalue_index);
- BasicType type = cp->basic_type_for_signature_at(signature_index);
+ BasicType type = _cp->basic_type_for_signature_at(signature_index);
// Remember how many oops we encountered and compute allocation type
FieldAllocationType atype = fac->update(is_static, type);
@@ -1245,8 +1195,8 @@
bool duplicate = false;
for (int i = 0; i < length; i++) {
FieldInfo* f = FieldInfo::from_field_array(fa, i);
- if (name == cp->symbol_at(f->name_index()) &&
- signature == cp->symbol_at(f->signature_index())) {
+ if (name == _cp->symbol_at(f->name_index()) &&
+ signature == _cp->symbol_at(f->signature_index())) {
// Symbol is desclared in Java so skip this one
duplicate = true;
break;
@@ -1280,8 +1230,9 @@
// fields array is trimed. Also unused slots that were reserved
// for generic signature indexes are discarded.
Array<u2>* fields = MetadataFactory::new_array<u2>(
- loader_data, index * FieldInfo::field_slots + num_generic_signature,
+ _loader_data, index * FieldInfo::field_slots + num_generic_signature,
CHECK_NULL);
+ _fields = fields; // save in case of error
{
int i = 0;
for (; i < index * FieldInfo::field_slots; i++) {
@@ -1303,7 +1254,7 @@
bool dup = false;
{
debug_only(No_Safepoint_Verifier nsv;)
- for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
+ for (AllFieldStream fs(fields, _cp); !fs.done(); fs.next()) {
Symbol* name = fs.name();
Symbol* sig = fs.signature();
// If no duplicates, add name/signature in hashtable names_and_sigs.
@@ -1330,10 +1281,8 @@
}
-u2* ClassFileParser::parse_exception_table(ClassLoaderData* loader_data,
- u4 code_length,
+u2* ClassFileParser::parse_exception_table(u4 code_length,
u4 exception_table_length,
- constantPoolHandle cp,
TRAPS) {
ClassFileStream* cfs = stream();
@@ -1354,8 +1303,7 @@
"Illegal exception table handler in class file %s",
CHECK_NULL);
if (catch_type_index != 0) {
- guarantee_property(valid_cp_range(catch_type_index, cp->length()) &&
- is_klass_reference(cp, catch_type_index),
+ guarantee_property(valid_klass_reference_at(catch_type_index),
"Catch type in exception table has bad constant type in class file %s", CHECK_NULL);
}
}
@@ -1506,7 +1454,6 @@
u2* ClassFileParser::parse_localvariable_table(u4 code_length,
u2 max_locals,
u4 code_attribute_length,
- constantPoolHandle cp,
u2* localvariable_table_length,
bool isLVTT,
TRAPS) {
@@ -1544,20 +1491,16 @@
"Invalid length %u in %s in class file %s",
length, tbl_name, CHECK_NULL);
}
- int cp_size = cp->length();
- guarantee_property(
- valid_cp_range(name_index, cp_size) &&
- cp->tag_at(name_index).is_utf8(),
+ int cp_size = _cp->length();
+ guarantee_property(valid_symbol_at(name_index),
"Name index %u in %s has bad constant type in class file %s",
name_index, tbl_name, CHECK_NULL);
- guarantee_property(
- valid_cp_range(descriptor_index, cp_size) &&
- cp->tag_at(descriptor_index).is_utf8(),
+ guarantee_property(valid_symbol_at(descriptor_index),
"Signature index %u in %s has bad constant type in class file %s",
descriptor_index, tbl_name, CHECK_NULL);
- Symbol* name = cp->symbol_at(name_index);
- Symbol* sig = cp->symbol_at(descriptor_index);
+ Symbol* name = _cp->symbol_at(name_index);
+ Symbol* sig = _cp->symbol_at(descriptor_index);
verify_legal_field_name(name, CHECK_NULL);
u2 extra_slot = 0;
if (!isLVTT) {
@@ -1579,7 +1522,7 @@
void ClassFileParser::parse_type_array(u2 array_length, u4 code_length, u4* u1_index, u4* u2_index,
- u1* u1_array, u2* u2_array, constantPoolHandle cp, TRAPS) {
+ u1* u1_array, u2* u2_array, TRAPS) {
ClassFileStream* cfs = stream();
u2 index = 0; // index in the array with long/double occupying two slots
u4 i1 = *u1_index;
@@ -1591,8 +1534,7 @@
index++;
} else if (tag == ITEM_Object) {
u2 class_index = u2_array[i2++] = cfs->get_u2(CHECK);
- guarantee_property(valid_cp_range(class_index, cp->length()) &&
- is_klass_reference(cp, class_index),
+ guarantee_property(valid_klass_reference_at(class_index),
"Bad class index %u in StackMap in class file %s",
class_index, CHECK);
} else if (tag == ITEM_Uninitialized) {
@@ -1613,8 +1555,7 @@
*u2_index = i2;
}
-Array<u1>* ClassFileParser::parse_stackmap_table(
- ClassLoaderData* loader_data,
+u1* ClassFileParser::parse_stackmap_table(
u4 code_attribute_length, TRAPS) {
if (code_attribute_length == 0)
return NULL;
@@ -1629,18 +1570,12 @@
if (!_need_verify && !DumpSharedSpaces) {
return NULL;
}
-
- Array<u1>* stackmap_data =
- MetadataFactory::new_array<u1>(loader_data, code_attribute_length, 0, CHECK_NULL);
-
- memcpy((void*)stackmap_data->adr_at(0),
- (void*)stackmap_table_start, code_attribute_length);
- return stackmap_data;
+ return stackmap_table_start;
}
u2* ClassFileParser::parse_checked_exceptions(u2* checked_exceptions_length,
u4 method_attribute_length,
- constantPoolHandle cp, TRAPS) {
+ TRAPS) {
ClassFileStream* cfs = stream();
cfs->guarantee_more(2, CHECK_NULL); // checked_exceptions_length
*checked_exceptions_length = cfs->get_u2_fast();
@@ -1657,8 +1592,7 @@
for (int i = 0; i < len; i++) {
checked_exception = cfs->get_u2_fast();
check_property(
- valid_cp_range(checked_exception, cp->length()) &&
- is_klass_reference(cp, checked_exception),
+ valid_klass_reference_at(checked_exception),
"Exception name has bad type at constant pool %u in class file %s",
checked_exception, CHECK_NULL);
}
@@ -1735,9 +1669,7 @@
}
// Sift through annotations, looking for those significant to the VM:
-void ClassFileParser::parse_annotations(ClassLoaderData* loader_data,
- u1* buffer, int limit,
- constantPoolHandle cp,
+void ClassFileParser::parse_annotations(u1* buffer, int limit,
ClassFileParser::AnnotationCollector* coll,
TRAPS) {
// annotations := do(nann:u2) {annotation}
@@ -1767,17 +1699,17 @@
u1* abase = buffer + index0;
int atype = Bytes::get_Java_u2(abase + atype_off);
int count = Bytes::get_Java_u2(abase + count_off);
- Symbol* aname = check_symbol_at(cp, atype);
+ Symbol* aname = check_symbol_at(_cp, atype);
if (aname == NULL) break; // invalid annotation name
Symbol* member = NULL;
if (count >= 1) {
int member_index = Bytes::get_Java_u2(abase + member_off);
- member = check_symbol_at(cp, member_index);
+ member = check_symbol_at(_cp, member_index);
if (member == NULL) break; // invalid member name
}
// Here is where parsing particular annotations will take place.
- AnnotationCollector::ID id = coll->annotation_index(loader_data, aname);
+ AnnotationCollector::ID id = coll->annotation_index(_loader_data, aname);
if (id == AnnotationCollector::_unknown) continue;
coll->set_annotation(id);
@@ -1836,6 +1768,12 @@
f->set_contended_group(contended_group());
}
+ClassFileParser::FieldAnnotationCollector::~FieldAnnotationCollector() {
+ // If there's an error deallocate metadata for field annotations
+ MetadataFactory::free_array<u1>(_loader_data, _field_annotations);
+ MetadataFactory::free_array<u1>(_loader_data, _field_type_annotations);
+}
+
void ClassFileParser::MethodAnnotationCollector::apply_to(methodHandle m) {
if (has_annotation(_method_ForceInline))
m->set_force_inline(true);
@@ -1894,10 +1832,9 @@
&& _need_verify
&& _major_version >= JAVA_1_5_VERSION) {
clear_hashtable(lvt_Hash);
- ConstantPool* cp = cm->constants();
classfile_parse_error("Duplicated LocalVariableTable attribute "
"entry for '%s' in class file %s",
- cp->symbol_at(lvt->name_cp_index)->as_utf8(),
+ _cp->symbol_at(lvt->name_cp_index)->as_utf8(),
CHECK);
}
}
@@ -1916,18 +1853,16 @@
if (entry == NULL) {
if (_need_verify) {
clear_hashtable(lvt_Hash);
- ConstantPool* cp = cm->constants();
classfile_parse_error("LVTT entry for '%s' in class file %s "
"does not match any LVT entry",
- cp->symbol_at(lvtt_elem.name_cp_index)->as_utf8(),
+ _cp->symbol_at(lvtt_elem.name_cp_index)->as_utf8(),
CHECK);
}
} else if (entry->_elem->signature_cp_index != 0 && _need_verify) {
clear_hashtable(lvt_Hash);
- ConstantPool* cp = cm->constants();
classfile_parse_error("Duplicated LocalVariableTypeTable attribute "
"entry for '%s' in class file %s",
- cp->symbol_at(lvtt_elem.name_cp_index)->as_utf8(),
+ _cp->symbol_at(lvtt_elem.name_cp_index)->as_utf8(),
CHECK);
} else {
// to add generic signatures into LocalVariableTable
@@ -1939,8 +1874,7 @@
}
-void ClassFileParser::copy_method_annotations(ClassLoaderData* loader_data,
- ConstMethod* cm,
+void ClassFileParser::copy_method_annotations(ConstMethod* cm,
u1* runtime_visible_annotations,
int runtime_visible_annotations_length,
u1* runtime_invisible_annotations,
@@ -1961,8 +1895,7 @@
if (runtime_visible_annotations_length +
runtime_invisible_annotations_length > 0) {
- a = assemble_annotations(loader_data,
- runtime_visible_annotations,
+ a = assemble_annotations(runtime_visible_annotations,
runtime_visible_annotations_length,
runtime_invisible_annotations,
runtime_invisible_annotations_length,
@@ -1972,8 +1905,7 @@
if (runtime_visible_parameter_annotations_length +
runtime_invisible_parameter_annotations_length > 0) {
- a = assemble_annotations(loader_data,
- runtime_visible_parameter_annotations,
+ a = assemble_annotations(runtime_visible_parameter_annotations,
runtime_visible_parameter_annotations_length,
runtime_invisible_parameter_annotations,
runtime_invisible_parameter_annotations_length,
@@ -1982,8 +1914,7 @@
}
if (annotation_default_length > 0) {
- a = assemble_annotations(loader_data,
- annotation_default,
+ a = assemble_annotations(annotation_default,
annotation_default_length,
NULL,
0,
@@ -1993,8 +1924,7 @@
if (runtime_visible_type_annotations_length +
runtime_invisible_type_annotations_length > 0) {
- a = assemble_annotations(loader_data,
- runtime_visible_type_annotations,
+ a = assemble_annotations(runtime_visible_type_annotations,
runtime_visible_type_annotations_length,
runtime_invisible_type_annotations,
runtime_invisible_type_annotations_length,
@@ -2013,9 +1943,7 @@
// from the method back up to the containing klass. These flag values
// are added to klass's access_flags.
-methodHandle ClassFileParser::parse_method(ClassLoaderData* loader_data,
- constantPoolHandle cp,
- bool is_interface,
+methodHandle ClassFileParser::parse_method(bool is_interface,
AccessFlags *promoted_flags,
TRAPS) {
ClassFileStream* cfs = stream();
@@ -2026,22 +1954,20 @@
int flags = cfs->get_u2_fast();
u2 name_index = cfs->get_u2_fast();
- int cp_size = cp->length();
+ int cp_size = _cp->length();
check_property(
- valid_cp_range(name_index, cp_size) &&
- cp->tag_at(name_index).is_utf8(),
+ valid_symbol_at(name_index),
"Illegal constant pool index %u for method name in class file %s",
name_index, CHECK_(nullHandle));
- Symbol* name = cp->symbol_at(name_index);
+ Symbol* name = _cp->symbol_at(name_index);
verify_legal_method_name(name, CHECK_(nullHandle));
u2 signature_index = cfs->get_u2_fast();
guarantee_property(
- valid_cp_range(signature_index, cp_size) &&
- cp->tag_at(signature_index).is_utf8(),
+ valid_symbol_at(signature_index),
"Illegal constant pool index %u for method signature in class file %s",
signature_index, CHECK_(nullHandle));
- Symbol* signature = cp->symbol_at(signature_index);
+ Symbol* signature = _cp->symbol_at(signature_index);
AccessFlags access_flags;
if (name == vmSymbols::class_initializer_name()) {
@@ -2097,7 +2023,8 @@
bool parsed_checked_exceptions_attribute = false;
bool parsed_stackmap_attribute = false;
// stackmap attribute - JDK1.5
- Array<u1>* stackmap_data = NULL;
+ u1* stackmap_data = NULL;
+ int stackmap_data_length = 0;
u2 generic_signature_index = 0;
MethodAnnotationCollector parsed_annotations;
u1* runtime_visible_annotations = NULL;
@@ -2122,12 +2049,11 @@
u2 method_attribute_name_index = cfs->get_u2_fast();
u4 method_attribute_length = cfs->get_u4_fast();
check_property(
- valid_cp_range(method_attribute_name_index, cp_size) &&
- cp->tag_at(method_attribute_name_index).is_utf8(),
+ valid_symbol_at(method_attribute_name_index),
"Invalid method attribute name index %u in class file %s",
method_attribute_name_index, CHECK_(nullHandle));
- Symbol* method_attribute_name = cp->symbol_at(method_attribute_name_index);
+ Symbol* method_attribute_name = _cp->symbol_at(method_attribute_name_index);
if (method_attribute_name == vmSymbols::tag_code()) {
// Parse Code attribute
if (_need_verify) {
@@ -2171,7 +2097,7 @@
exception_table_length = cfs->get_u2_fast();
if (exception_table_length > 0) {
exception_table_start =
- parse_exception_table(loader_data, code_length, exception_table_length, cp, CHECK_(nullHandle));
+ parse_exception_table(code_length, exception_table_length, CHECK_(nullHandle));
}
// Parse additional attributes in code attribute
@@ -2204,19 +2130,18 @@
calculated_attribute_length += code_attribute_length +
sizeof(code_attribute_name_index) +
sizeof(code_attribute_length);
- check_property(valid_cp_range(code_attribute_name_index, cp_size) &&
- cp->tag_at(code_attribute_name_index).is_utf8(),
+ check_property(valid_symbol_at(code_attribute_name_index),
"Invalid code attribute name index %u in class file %s",
code_attribute_name_index,
CHECK_(nullHandle));
if (LoadLineNumberTables &&
- cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_line_number_table()) {
+ _cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_line_number_table()) {
// Parse and compress line number table
parse_linenumber_table(code_attribute_length, code_length,
&linenumber_table, CHECK_(nullHandle));
} else if (LoadLocalVariableTables &&
- cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_local_variable_table()) {
+ _cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_local_variable_table()) {
// Parse local variable table
if (!lvt_allocated) {
localvariable_table_length = NEW_RESOURCE_ARRAY_IN_THREAD(
@@ -2238,7 +2163,6 @@
parse_localvariable_table(code_length,
max_locals,
code_attribute_length,
- cp,
&localvariable_table_length[lvt_cnt],
false, // is not LVTT
CHECK_(nullHandle));
@@ -2246,7 +2170,7 @@
lvt_cnt++;
} else if (LoadLocalVariableTypeTables &&
_major_version >= JAVA_1_5_VERSION &&
- cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_local_variable_type_table()) {
+ _cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_local_variable_type_table()) {
if (!lvt_allocated) {
localvariable_table_length = NEW_RESOURCE_ARRAY_IN_THREAD(
THREAD, u2, INITIAL_MAX_LVT_NUMBER);
@@ -2268,19 +2192,19 @@
parse_localvariable_table(code_length,
max_locals,
code_attribute_length,
- cp,
&localvariable_type_table_length[lvtt_cnt],
true, // is LVTT
CHECK_(nullHandle));
lvtt_cnt++;
} else if (UseSplitVerifier &&
_major_version >= Verifier::STACKMAP_ATTRIBUTE_MAJOR_VERSION &&
- cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_stack_map_table()) {
+ _cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_stack_map_table()) {
// Stack map is only needed by the new verifier in JDK1.5.
if (parsed_stackmap_attribute) {
classfile_parse_error("Multiple StackMapTable attributes in class file %s", CHECK_(nullHandle));
}
- stackmap_data = parse_stackmap_table(loader_data, code_attribute_length, CHECK_(nullHandle));
+ stackmap_data = parse_stackmap_table(code_attribute_length, CHECK_(nullHandle));
+ stackmap_data_length = code_attribute_length;
parsed_stackmap_attribute = true;
} else {
// Skip unknown attributes
@@ -2301,7 +2225,7 @@
checked_exceptions_start =
parse_checked_exceptions(&checked_exceptions_length,
method_attribute_length,
- cp, CHECK_(nullHandle));
+ CHECK_(nullHandle));
} else if (method_attribute_name == vmSymbols::tag_method_parameters()) {
// reject multiple method parameters
if (method_parameters_seen) {
@@ -2359,9 +2283,8 @@
runtime_visible_annotations_length = method_attribute_length;
runtime_visible_annotations = cfs->get_u1_buffer();
assert(runtime_visible_annotations != NULL, "null visible annotations");
- parse_annotations(loader_data,
- runtime_visible_annotations,
- runtime_visible_annotations_length, cp, &parsed_annotations,
+ parse_annotations(runtime_visible_annotations,
+ runtime_visible_annotations_length, &parsed_annotations,
CHECK_(nullHandle));
cfs->skip_u1(runtime_visible_annotations_length, CHECK_(nullHandle));
} else if (PreserveAllAnnotations && method_attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
@@ -2434,18 +2357,18 @@
0);
Method* m = Method::allocate(
- loader_data, code_length, access_flags, &sizes,
+ _loader_data, code_length, access_flags, &sizes,
ConstMethod::NORMAL, CHECK_(nullHandle));
ClassLoadingService::add_class_method_size(m->size()*HeapWordSize);
// Fill in information from fixed part (access_flags already set)
- m->set_constants(cp());
+ m->set_constants(_cp);
m->set_name_index(name_index);
m->set_signature_index(signature_index);
#ifdef CC_INTERP
// hmm is there a gc issue here??
- ResultTypeFinder rtf(cp->symbol_at(signature_index));
+ ResultTypeFinder rtf(_cp->symbol_at(signature_index));
m->set_result_index(rtf.type());
#endif
@@ -2464,7 +2387,10 @@
// Fill in code attribute information
m->set_max_stack(max_stack);
m->set_max_locals(max_locals);
- m->constMethod()->set_stackmap_data(stackmap_data);
+ if (stackmap_data != NULL) {
+ m->constMethod()->copy_stackmap_data(_loader_data, stackmap_data,
+ stackmap_data_length, CHECK_NULL);
+ }
// Copy byte codes
m->set_code(code_start);
@@ -2520,7 +2446,7 @@
parsed_annotations.apply_to(m);
// Copy annotations
- copy_method_annotations(loader_data, m->constMethod(),
+ copy_method_annotations(m->constMethod(),
runtime_visible_annotations,
runtime_visible_annotations_length,
runtime_invisible_annotations,
@@ -2560,9 +2486,7 @@
// from the methods back up to the containing klass. These flag values
// are added to klass's access_flags.
-Array<Method*>* ClassFileParser::parse_methods(ClassLoaderData* loader_data,
- constantPoolHandle cp,
- bool is_interface,
+Array<Method*>* ClassFileParser::parse_methods(bool is_interface,
AccessFlags* promoted_flags,
bool* has_final_method,
bool* has_default_methods,
@@ -2571,15 +2495,13 @@
cfs->guarantee_more(2, CHECK_NULL); // length
u2 length = cfs->get_u2_fast();
if (length == 0) {
- return Universe::the_empty_method_array();
+ _methods = Universe::the_empty_method_array();
} else {
- // FIXME: Handle leaks at later failures.
- Array<Method*>* methods = MetadataFactory::new_array<Method*>(loader_data, length, NULL, CHECK_NULL);
+ _methods = MetadataFactory::new_array<Method*>(_loader_data, length, NULL, CHECK_NULL);
HandleMark hm(THREAD);
for (int index = 0; index < length; index++) {
- methodHandle method = parse_method(loader_data,
- cp, is_interface,
+ methodHandle method = parse_method(is_interface,
promoted_flags,
CHECK_NULL);
@@ -2590,7 +2512,7 @@
// default method
*has_default_methods = true;
}
- methods->at_put(index, method());
+ _methods->at_put(index, method());
}
if (_need_verify && length > 1) {
@@ -2603,7 +2525,7 @@
{
debug_only(No_Safepoint_Verifier nsv;)
for (int i = 0; i < length; i++) {
- Method* m = methods->at(i);
+ Method* m = _methods->at(i);
// If no duplicates, add name/signature in hashtable names_and_sigs.
if (!put_after_lookup(m->name(), m->signature(), names_and_sigs)) {
dup = true;
@@ -2616,14 +2538,12 @@
CHECK_NULL);
}
}
- return methods;
}
+ return _methods;
}
-Array<int>* ClassFileParser::sort_methods(ClassLoaderData* loader_data,
- Array<Method*>* methods,
- TRAPS) {
+intArray* ClassFileParser::sort_methods(Array<Method*>* methods) {
int length = methods->length();
// If JVMTI original method ordering or sharing is enabled we have to
// remember the original class file ordering.
@@ -2641,10 +2561,11 @@
// Note that the ordering is not alphabetical, see Symbol::fast_compare
Method::sort_methods(methods);
+ intArray* method_ordering = NULL;
// If JVMTI original method ordering or sharing is enabled construct int
// array remembering the original ordering
if (JvmtiExport::can_maintain_original_method_order() || DumpSharedSpaces) {
- Array<int>* method_ordering = MetadataFactory::new_array<int>(loader_data, length, CHECK_NULL);
+ method_ordering = new intArray(length);
for (int index = 0; index < length; index++) {
Method* m = methods->at(index);
int old_index = m->vtable_index();
@@ -2652,29 +2573,25 @@
method_ordering->at_put(index, old_index);
m->set_vtable_index(Method::invalid_vtable_index);
}
- return method_ordering;
- } else {
- return Universe::the_empty_int_array();
}
+ return method_ordering;
}
-void ClassFileParser::parse_classfile_sourcefile_attribute(constantPoolHandle cp, TRAPS) {
+void ClassFileParser::parse_classfile_sourcefile_attribute(TRAPS) {
ClassFileStream* cfs = stream();
cfs->guarantee_more(2, CHECK); // sourcefile_index
u2 sourcefile_index = cfs->get_u2_fast();
check_property(
- valid_cp_range(sourcefile_index, cp->length()) &&
- cp->tag_at(sourcefile_index).is_utf8(),
+ valid_symbol_at(sourcefile_index),
"Invalid SourceFile attribute at constant pool index %u in class file %s",
sourcefile_index, CHECK);
- set_class_sourcefile(cp->symbol_at(sourcefile_index));
+ set_class_sourcefile(_cp->symbol_at(sourcefile_index));
}
-void ClassFileParser::parse_classfile_source_debug_extension_attribute(constantPoolHandle cp,
- int length, TRAPS) {
+void ClassFileParser::parse_classfile_source_debug_extension_attribute(int length, TRAPS) {
ClassFileStream* cfs = stream();
u1* sde_buffer = cfs->get_u1_buffer();
assert(sde_buffer != NULL, "null sde buffer");
@@ -2698,12 +2615,10 @@
#define RECOGNIZED_INNER_CLASS_MODIFIERS (JVM_RECOGNIZED_CLASS_MODIFIERS | JVM_ACC_PRIVATE | JVM_ACC_PROTECTED | JVM_ACC_STATIC)
// Return number of classes in the inner classes attribute table
-u2 ClassFileParser::parse_classfile_inner_classes_attribute(ClassLoaderData* loader_data,
- u1* inner_classes_attribute_start,
+u2 ClassFileParser::parse_classfile_inner_classes_attribute(u1* inner_classes_attribute_start,
bool parsed_enclosingmethod_attribute,
u2 enclosing_method_class_index,
u2 enclosing_method_method_index,
- constantPoolHandle cp,
TRAPS) {
ClassFileStream* cfs = stream();
u1* current_mark = cfs->current();
@@ -2724,33 +2639,31 @@
// enclosing_method_class_index,
// enclosing_method_method_index]
int size = length * 4 + (parsed_enclosingmethod_attribute ? 2 : 0);
- // FIXME: Will leak on exceptions.
- Array<u2>* inner_classes = MetadataFactory::new_array<u2>(loader_data, size, CHECK_0);
+ Array<u2>* inner_classes = MetadataFactory::new_array<u2>(_loader_data, size, CHECK_0);
+ _inner_classes = inner_classes;
+
int index = 0;
- int cp_size = cp->length();
+ int cp_size = _cp->length();
cfs->guarantee_more(8 * length, CHECK_0); // 4-tuples of u2
for (int n = 0; n < length; n++) {
// Inner class index
u2 inner_class_info_index = cfs->get_u2_fast();
check_property(
inner_class_info_index == 0 ||
- (valid_cp_range(inner_class_info_index, cp_size) &&
- is_klass_reference(cp, inner_class_info_index)),
+ valid_klass_reference_at(inner_class_info_index),
"inner_class_info_index %u has bad constant type in class file %s",
inner_class_info_index, CHECK_0);
// Outer class index
u2 outer_class_info_index = cfs->get_u2_fast();
check_property(
outer_class_info_index == 0 ||
- (valid_cp_range(outer_class_info_index, cp_size) &&
- is_klass_reference(cp, outer_class_info_index)),
+ valid_klass_reference_at(outer_class_info_index),
"outer_class_info_index %u has bad constant type in class file %s",
outer_class_info_index, CHECK_0);
// Inner class name
u2 inner_name_index = cfs->get_u2_fast();
check_property(
- inner_name_index == 0 || (valid_cp_range(inner_name_index, cp_size) &&
- cp->tag_at(inner_name_index).is_utf8()),
+ inner_name_index == 0 || valid_symbol_at(inner_name_index),
"inner_name_index %u has bad constant type in class file %s",
inner_name_index, CHECK_0);
if (_need_verify) {
@@ -2794,33 +2707,27 @@
}
assert(index == size, "wrong size");
- // Update InstanceKlass with inner class info.
- set_class_inner_classes(inner_classes);
-
// Restore buffer's current position.
cfs->set_current(current_mark);
return length;
}
-void ClassFileParser::parse_classfile_synthetic_attribute(constantPoolHandle cp, TRAPS) {
+void ClassFileParser::parse_classfile_synthetic_attribute(TRAPS) {
set_class_synthetic_flag(true);
}
-void ClassFileParser::parse_classfile_signature_attribute(constantPoolHandle cp, TRAPS) {
+void ClassFileParser::parse_classfile_signature_attribute(TRAPS) {
ClassFileStream* cfs = stream();
u2 signature_index = cfs->get_u2(CHECK);
check_property(
- valid_cp_range(signature_index, cp->length()) &&
- cp->tag_at(signature_index).is_utf8(),
+ valid_symbol_at(signature_index),
"Invalid constant pool index %u in Signature attribute in class file %s",
signature_index, CHECK);
- set_class_generic_signature(cp->symbol_at(signature_index));
+ set_class_generic_signature(_cp->symbol_at(signature_index));
}
-void ClassFileParser::parse_classfile_bootstrap_methods_attribute(ClassLoaderData* loader_data,
- constantPoolHandle cp,
- u4 attribute_byte_length, TRAPS) {
+void ClassFileParser::parse_classfile_bootstrap_methods_attribute(u4 attribute_byte_length, TRAPS) {
ClassFileStream* cfs = stream();
u1* current_start = cfs->current();
@@ -2841,10 +2748,14 @@
// The array begins with a series of short[2] pairs, one for each tuple.
int index_size = (attribute_array_length * 2);
- Array<u2>* operands = MetadataFactory::new_array<u2>(loader_data, index_size + operand_count, CHECK);
+ Array<u2>* operands = MetadataFactory::new_array<u2>(_loader_data, index_size + operand_count, CHECK);
+
+ // Eagerly assign operands so they will be deallocated with the constant
+ // pool if there is an error.
+ _cp->set_operands(operands);
int operand_fill_index = index_size;
- int cp_size = cp->length();
+ int cp_size = _cp->length();
for (int n = 0; n < attribute_array_length; n++) {
// Store a 32-bit offset into the header of the operand array.
@@ -2856,7 +2767,7 @@
u2 argument_count = cfs->get_u2_fast();
check_property(
valid_cp_range(bootstrap_method_index, cp_size) &&
- cp->tag_at(bootstrap_method_index).is_method_handle(),
+ _cp->tag_at(bootstrap_method_index).is_method_handle(),
"bootstrap_method_index %u has bad constant type in class file %s",
bootstrap_method_index,
CHECK);
@@ -2868,7 +2779,7 @@
u2 argument_index = cfs->get_u2_fast();
check_property(
valid_cp_range(argument_index, cp_size) &&
- cp->tag_at(argument_index).is_loadable_constant(),
+ _cp->tag_at(argument_index).is_loadable_constant(),
"argument_index %u has bad constant type in class file %s",
argument_index,
CHECK);
@@ -2883,17 +2794,13 @@
guarantee_property(current_end == current_start + attribute_byte_length,
"Bad length on BootstrapMethods in class file %s",
CHECK);
-
- cp->set_operands(operands);
}
-void ClassFileParser::parse_classfile_attributes(ClassLoaderData* loader_data,
- constantPoolHandle cp,
- ClassFileParser::ClassAnnotationCollector* parsed_annotations,
+void ClassFileParser::parse_classfile_attributes(ClassFileParser::ClassAnnotationCollector* parsed_annotations,
TRAPS) {
ClassFileStream* cfs = stream();
// Set inner classes attribute to default sentinel
- set_class_inner_classes(Universe::the_empty_short_array());
+ _inner_classes = Universe::the_empty_short_array();
cfs->guarantee_more(2, CHECK); // attributes_count
u2 attributes_count = cfs->get_u2_fast();
bool parsed_sourcefile_attribute = false;
@@ -2918,11 +2825,10 @@
u2 attribute_name_index = cfs->get_u2_fast();
u4 attribute_length = cfs->get_u4_fast();
check_property(
- valid_cp_range(attribute_name_index, cp->length()) &&
- cp->tag_at(attribute_name_index).is_utf8(),
+ valid_symbol_at(attribute_name_index),
"Attribute name has bad constant pool index %u in class file %s",
attribute_name_index, CHECK);
- Symbol* tag = cp->symbol_at(attribute_name_index);
+ Symbol* tag = _cp->symbol_at(attribute_name_index);
if (tag == vmSymbols::tag_source_file()) {
// Check for SourceFile tag
if (_need_verify) {
@@ -2933,10 +2839,10 @@
} else {
parsed_sourcefile_attribute = true;
}
- parse_classfile_sourcefile_attribute(cp, CHECK);
+ parse_classfile_sourcefile_attribute(CHECK);
} else if (tag == vmSymbols::tag_source_debug_extension()) {
// Check for SourceDebugExtension tag
- parse_classfile_source_debug_extension_attribute(cp, (int)attribute_length, CHECK);
+ parse_classfile_source_debug_extension_attribute((int)attribute_length, CHECK);
} else if (tag == vmSymbols::tag_inner_classes()) {
// Check for InnerClasses tag
if (parsed_innerclasses_attribute) {
@@ -2955,7 +2861,7 @@
"Invalid Synthetic classfile attribute length %u in class file %s",
attribute_length, CHECK);
}
- parse_classfile_synthetic_attribute(cp, CHECK);
+ parse_classfile_synthetic_attribute(CHECK);
} else if (tag == vmSymbols::tag_deprecated()) {
// Check for Deprecatd tag - 4276120
if (attribute_length != 0) {
@@ -2970,15 +2876,13 @@
"Wrong Signature attribute length %u in class file %s",
attribute_length, CHECK);
}
- parse_classfile_signature_attribute(cp, CHECK);
+ parse_classfile_signature_attribute(CHECK);
} else if (tag == vmSymbols::tag_runtime_visible_annotations()) {
runtime_visible_annotations_length = attribute_length;
runtime_visible_annotations = cfs->get_u1_buffer();
assert(runtime_visible_annotations != NULL, "null visible annotations");
- parse_annotations(loader_data,
- runtime_visible_annotations,
+ parse_annotations(runtime_visible_annotations,
runtime_visible_annotations_length,
- cp,
parsed_annotations,
CHECK);
cfs->skip_u1(runtime_visible_annotations_length, CHECK);
@@ -3000,13 +2904,11 @@
classfile_parse_error("Invalid class index in EnclosingMethod attribute in class file %s", CHECK);
}
// Validate the constant pool indices and types
- if (!cp->is_within_bounds(enclosing_method_class_index) ||
- !is_klass_reference(cp, enclosing_method_class_index)) {
- classfile_parse_error("Invalid or out-of-bounds class index in EnclosingMethod attribute in class file %s", CHECK);
- }
+ check_property(valid_klass_reference_at(enclosing_method_class_index),
+ "Invalid or out-of-bounds class index in EnclosingMethod attribute in class file %s", CHECK);
if (enclosing_method_method_index != 0 &&
- (!cp->is_within_bounds(enclosing_method_method_index) ||
- !cp->tag_at(enclosing_method_method_index).is_name_and_type())) {
+ (!_cp->is_within_bounds(enclosing_method_method_index) ||
+ !_cp->tag_at(enclosing_method_method_index).is_name_and_type())) {
classfile_parse_error("Invalid or out-of-bounds method index in EnclosingMethod attribute in class file %s", CHECK);
}
} else if (tag == vmSymbols::tag_bootstrap_methods() &&
@@ -3014,7 +2916,7 @@
if (parsed_bootstrap_methods_attribute)
classfile_parse_error("Multiple BootstrapMethods attributes in class file %s", CHECK);
parsed_bootstrap_methods_attribute = true;
- parse_classfile_bootstrap_methods_attribute(loader_data, cp, attribute_length, CHECK);
+ parse_classfile_bootstrap_methods_attribute(attribute_length, CHECK);
} else if (tag == vmSymbols::tag_runtime_visible_type_annotations()) {
runtime_visible_type_annotations_length = attribute_length;
runtime_visible_type_annotations = cfs->get_u1_buffer();
@@ -3035,29 +2937,24 @@
cfs->skip_u1(attribute_length, CHECK);
}
}
- AnnotationArray* annotations = assemble_annotations(loader_data,
- runtime_visible_annotations,
- runtime_visible_annotations_length,
- runtime_invisible_annotations,
- runtime_invisible_annotations_length,
- CHECK);
- set_class_annotations(annotations);
- AnnotationArray* type_annotations = assemble_annotations(loader_data,
- runtime_visible_type_annotations,
- runtime_visible_type_annotations_length,
- runtime_invisible_type_annotations,
- runtime_invisible_type_annotations_length,
- CHECK);
- set_class_type_annotations(type_annotations);
+ _annotations = assemble_annotations(runtime_visible_annotations,
+ runtime_visible_annotations_length,
+ runtime_invisible_annotations,
+ runtime_invisible_annotations_length,
+ CHECK);
+ _type_annotations = assemble_annotations(runtime_visible_type_annotations,
+ runtime_visible_type_annotations_length,
+ runtime_invisible_type_annotations,
+ runtime_invisible_type_annotations_length,
+ CHECK);
if (parsed_innerclasses_attribute || parsed_enclosingmethod_attribute) {
u2 num_of_classes = parse_classfile_inner_classes_attribute(
- loader_data,
inner_classes_attribute_start,
parsed_innerclasses_attribute,
enclosing_method_class_index,
enclosing_method_method_index,
- cp, CHECK);
+ CHECK);
if (parsed_innerclasses_attribute &&_need_verify && _major_version >= JAVA_1_5_VERSION) {
guarantee_property(
inner_classes_attribute_length == sizeof(num_of_classes) + 4 * sizeof(u2) * num_of_classes,
@@ -3085,18 +2982,43 @@
if (_sde_buffer != NULL) {
k->set_source_debug_extension(_sde_buffer, _sde_length);
}
- k->set_inner_classes(_inner_classes);
}
-AnnotationArray* ClassFileParser::assemble_annotations(ClassLoaderData* loader_data,
- u1* runtime_visible_annotations,
+// Transfer ownership of metadata allocated to the InstanceKlass.
+void ClassFileParser::apply_parsed_class_metadata(
+ instanceKlassHandle this_klass,
+ int java_fields_count, TRAPS) {
+ // Assign annotations if needed
+ if (_annotations != NULL || _type_annotations != NULL ||
+ _fields_annotations != NULL || _fields_type_annotations != NULL) {
+ Annotations* annotations = Annotations::allocate(_loader_data, CHECK);
+ annotations->set_class_annotations(_annotations);
+ annotations->set_class_type_annotations(_type_annotations);
+ annotations->set_fields_annotations(_fields_annotations);
+ annotations->set_fields_type_annotations(_fields_type_annotations);
+ this_klass->set_annotations(annotations);
+ }
+
+ _cp->set_pool_holder(this_klass());
+ this_klass->set_constants(_cp);
+ this_klass->set_fields(_fields, java_fields_count);
+ this_klass->set_methods(_methods);
+ this_klass->set_inner_classes(_inner_classes);
+ this_klass->set_local_interfaces(_local_interfaces);
+ this_klass->set_transitive_interfaces(_transitive_interfaces);
+
+ // Clear out these fields so they don't get deallocated by the destructor
+ clear_class_metadata();
+}
+
+AnnotationArray* ClassFileParser::assemble_annotations(u1* runtime_visible_annotations,
int runtime_visible_annotations_length,
u1* runtime_invisible_annotations,
int runtime_invisible_annotations_length, TRAPS) {
AnnotationArray* annotations = NULL;
if (runtime_visible_annotations != NULL ||
runtime_invisible_annotations != NULL) {
- annotations = MetadataFactory::new_array<u1>(loader_data,
+ annotations = MetadataFactory::new_array<u1>(_loader_data,
runtime_visible_annotations_length +
runtime_invisible_annotations_length,
CHECK_(annotations));
@@ -3144,6 +3066,581 @@
#endif // ndef PRODUCT
+instanceKlassHandle ClassFileParser::parse_super_class(int super_class_index,
+ TRAPS) {
+ instanceKlassHandle super_klass;
+ if (super_class_index == 0) {
+ check_property(_class_name == vmSymbols::java_lang_Object(),
+ "Invalid superclass index %u in class file %s",
+ super_class_index,
+ CHECK_NULL);
+ } else {
+ check_property(valid_klass_reference_at(super_class_index),
+ "Invalid superclass index %u in class file %s",
+ super_class_index,
+ CHECK_NULL);
+ // The class name should be legal because it is checked when parsing constant pool.
+ // However, make sure it is not an array type.
+ bool is_array = false;
+ if (_cp->tag_at(super_class_index).is_klass()) {
+ super_klass = instanceKlassHandle(THREAD, _cp->resolved_klass_at(super_class_index));
+ if (_need_verify)
+ is_array = super_klass->oop_is_array();
+ } else if (_need_verify) {
+ is_array = (_cp->unresolved_klass_at(super_class_index)->byte_at(0) == JVM_SIGNATURE_ARRAY);
+ }
+ if (_need_verify) {
+ guarantee_property(!is_array,
+ "Bad superclass name in class file %s", CHECK_NULL);
+ }
+ }
+ return super_klass;
+}
+
+
+// Values needed for oopmap and InstanceKlass creation
+class FieldLayoutInfo : public StackObj {
+ public:
+ int* nonstatic_oop_offsets;
+ unsigned int* nonstatic_oop_counts;
+ unsigned int nonstatic_oop_map_count;
+ unsigned int total_oop_map_count;
+ int instance_size;
+ int nonstatic_field_size;
+ int static_field_size;
+ bool has_nonstatic_fields;
+};
+
+// Layout fields and fill in FieldLayoutInfo. Could use more refactoring!
+void ClassFileParser::layout_fields(Handle class_loader,
+ FieldAllocationCount* fac,
+ ClassAnnotationCollector* parsed_annotations,
+ FieldLayoutInfo* info,
+ TRAPS) {
+
+ // get the padding width from the option
+ // TODO: Ask VM about specific CPU we are running on
+ int pad_size = ContendedPaddingWidth;
+
+ // Field size and offset computation
+ int nonstatic_field_size = _super_klass() == NULL ? 0 : _super_klass()->nonstatic_field_size();
+#ifndef PRODUCT
+ int orig_nonstatic_field_size = 0;
+#endif
+ int next_static_oop_offset;
+ int next_static_double_offset;
+ int next_static_word_offset;
+ int next_static_short_offset;
+ int next_static_byte_offset;
+ int next_nonstatic_oop_offset;
+ int next_nonstatic_double_offset;
+ int next_nonstatic_word_offset;
+ int next_nonstatic_short_offset;
+ int next_nonstatic_byte_offset;
+ int next_nonstatic_type_offset;
+ int first_nonstatic_oop_offset;
+ int first_nonstatic_field_offset;
+ int next_nonstatic_field_offset;
+ int next_nonstatic_padded_offset;
+
+ // Count the contended fields by type.
+ int nonstatic_contended_count = 0;
+ FieldAllocationCount fac_contended;
+ for (AllFieldStream fs(_fields, _cp); !fs.done(); fs.next()) {
+ FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
+ if (fs.is_contended()) {
+ fac_contended.count[atype]++;
+ if (!fs.access_flags().is_static()) {
+ nonstatic_contended_count++;
+ }
+ }
+ }
+ int contended_count = nonstatic_contended_count;
+
+
+ // Calculate the starting byte offsets
+ next_static_oop_offset = InstanceMirrorKlass::offset_of_static_fields();
+ next_static_double_offset = next_static_oop_offset +
+ ((fac->count[STATIC_OOP]) * heapOopSize);
+ if ( fac->count[STATIC_DOUBLE] &&
+ (Universe::field_type_should_be_aligned(T_DOUBLE) ||
+ Universe::field_type_should_be_aligned(T_LONG)) ) {
+ next_static_double_offset = align_size_up(next_static_double_offset, BytesPerLong);
+ }
+
+ next_static_word_offset = next_static_double_offset +
+ ((fac->count[STATIC_DOUBLE]) * BytesPerLong);
+ next_static_short_offset = next_static_word_offset +
+ ((fac->count[STATIC_WORD]) * BytesPerInt);
+ next_static_byte_offset = next_static_short_offset +
+ ((fac->count[STATIC_SHORT]) * BytesPerShort);
+
+ first_nonstatic_field_offset = instanceOopDesc::base_offset_in_bytes() +
+ nonstatic_field_size * heapOopSize;
+
+ // class is contended, pad before all the fields
+ if (parsed_annotations->is_contended()) {
+ first_nonstatic_field_offset += pad_size;
+ }
+
+ next_nonstatic_field_offset = first_nonstatic_field_offset;
+
+ unsigned int nonstatic_double_count = fac->count[NONSTATIC_DOUBLE] - fac_contended.count[NONSTATIC_DOUBLE];
+ unsigned int nonstatic_word_count = fac->count[NONSTATIC_WORD] - fac_contended.count[NONSTATIC_WORD];
+ unsigned int nonstatic_short_count = fac->count[NONSTATIC_SHORT] - fac_contended.count[NONSTATIC_SHORT];
+ unsigned int nonstatic_byte_count = fac->count[NONSTATIC_BYTE] - fac_contended.count[NONSTATIC_BYTE];
+ unsigned int nonstatic_oop_count = fac->count[NONSTATIC_OOP] - fac_contended.count[NONSTATIC_OOP];
+
+ bool super_has_nonstatic_fields =
+ (_super_klass() != NULL && _super_klass->has_nonstatic_fields());
+ bool has_nonstatic_fields = super_has_nonstatic_fields ||
+ ((nonstatic_double_count + nonstatic_word_count +
+ nonstatic_short_count + nonstatic_byte_count +
+ nonstatic_oop_count) != 0);
+
+
+ // Prepare list of oops for oop map generation.
+ int* nonstatic_oop_offsets;
+ unsigned int* nonstatic_oop_counts;
+ unsigned int nonstatic_oop_map_count = 0;
+
+ nonstatic_oop_offsets = NEW_RESOURCE_ARRAY_IN_THREAD(
+ THREAD, int, nonstatic_oop_count + 1);
+ nonstatic_oop_counts = NEW_RESOURCE_ARRAY_IN_THREAD(
+ THREAD, unsigned int, nonstatic_oop_count + 1);
+
+ first_nonstatic_oop_offset = 0; // will be set for first oop field
+
+#ifndef PRODUCT
+ if( PrintCompactFieldsSavings ) {
+ next_nonstatic_double_offset = next_nonstatic_field_offset +
+ (nonstatic_oop_count * heapOopSize);
+ if ( nonstatic_double_count > 0 ) {
+ next_nonstatic_double_offset = align_size_up(next_nonstatic_double_offset, BytesPerLong);
+ }
+ next_nonstatic_word_offset = next_nonstatic_double_offset +
+ (nonstatic_double_count * BytesPerLong);
+ next_nonstatic_short_offset = next_nonstatic_word_offset +
+ (nonstatic_word_count * BytesPerInt);
+ next_nonstatic_byte_offset = next_nonstatic_short_offset +
+ (nonstatic_short_count * BytesPerShort);
+ next_nonstatic_type_offset = align_size_up((next_nonstatic_byte_offset +
+ nonstatic_byte_count ), heapOopSize );
+ orig_nonstatic_field_size = nonstatic_field_size +
+ ((next_nonstatic_type_offset - first_nonstatic_field_offset)/heapOopSize);
+ }
+#endif
+ bool compact_fields = CompactFields;
+ int allocation_style = FieldsAllocationStyle;
+ if( allocation_style < 0 || allocation_style > 2 ) { // Out of range?
+ assert(false, "0 <= FieldsAllocationStyle <= 2");
+ allocation_style = 1; // Optimistic
+ }
+
+ // The next classes have predefined hard-coded fields offsets
+ // (see in JavaClasses::compute_hard_coded_offsets()).
+ // Use default fields allocation order for them.
+ if( (allocation_style != 0 || compact_fields ) && class_loader.is_null() &&
+ (_class_name == vmSymbols::java_lang_AssertionStatusDirectives() ||
+ _class_name == vmSymbols::java_lang_Class() ||
+ _class_name == vmSymbols::java_lang_ClassLoader() ||
+ _class_name == vmSymbols::java_lang_ref_Reference() ||
+ _class_name == vmSymbols::java_lang_ref_SoftReference() ||
+ _class_name == vmSymbols::java_lang_StackTraceElement() ||
+ _class_name == vmSymbols::java_lang_String() ||
+ _class_name == vmSymbols::java_lang_Throwable() ||
+ _class_name == vmSymbols::java_lang_Boolean() ||
+ _class_name == vmSymbols::java_lang_Character() ||
+ _class_name == vmSymbols::java_lang_Float() ||
+ _class_name == vmSymbols::java_lang_Double() ||
+ _class_name == vmSymbols::java_lang_Byte() ||
+ _class_name == vmSymbols::java_lang_Short() ||
+ _class_name == vmSymbols::java_lang_Integer() ||
+ _class_name == vmSymbols::java_lang_Long())) {
+ allocation_style = 0; // Allocate oops first
+ compact_fields = false; // Don't compact fields
+ }
+
+ if( allocation_style == 0 ) {
+ // Fields order: oops, longs/doubles, ints, shorts/chars, bytes, padded fields
+ next_nonstatic_oop_offset = next_nonstatic_field_offset;
+ next_nonstatic_double_offset = next_nonstatic_oop_offset +
+ (nonstatic_oop_count * heapOopSize);
+ } else if( allocation_style == 1 ) {
+ // Fields order: longs/doubles, ints, shorts/chars, bytes, oops, padded fields
+ next_nonstatic_double_offset = next_nonstatic_field_offset;
+ } else if( allocation_style == 2 ) {
+ // Fields allocation: oops fields in super and sub classes are together.
+ if( nonstatic_field_size > 0 && _super_klass() != NULL &&
+ _super_klass->nonstatic_oop_map_size() > 0 ) {
+ unsigned int map_count = _super_klass->nonstatic_oop_map_count();
+ OopMapBlock* first_map = _super_klass->start_of_nonstatic_oop_maps();
+ OopMapBlock* last_map = first_map + map_count - 1;
+ int next_offset = last_map->offset() + (last_map->count() * heapOopSize);
+ if (next_offset == next_nonstatic_field_offset) {
+ allocation_style = 0; // allocate oops first
+ next_nonstatic_oop_offset = next_nonstatic_field_offset;
+ next_nonstatic_double_offset = next_nonstatic_oop_offset +
+ (nonstatic_oop_count * heapOopSize);
+ }
+ }
+ if( allocation_style == 2 ) {
+ allocation_style = 1; // allocate oops last
+ next_nonstatic_double_offset = next_nonstatic_field_offset;
+ }
+ } else {
+ ShouldNotReachHere();
+ }
+
+ int nonstatic_oop_space_count = 0;
+ int nonstatic_word_space_count = 0;
+ int nonstatic_short_space_count = 0;
+ int nonstatic_byte_space_count = 0;
+ int nonstatic_oop_space_offset;
+ int nonstatic_word_space_offset;
+ int nonstatic_short_space_offset;
+ int nonstatic_byte_space_offset;
+
+ if( nonstatic_double_count > 0 ) {
+ int offset = next_nonstatic_double_offset;
+ next_nonstatic_double_offset = align_size_up(offset, BytesPerLong);
+ if( compact_fields && offset != next_nonstatic_double_offset ) {
+ // Allocate available fields into the gap before double field.
+ int length = next_nonstatic_double_offset - offset;
+ assert(length == BytesPerInt, "");
+ nonstatic_word_space_offset = offset;
+ if( nonstatic_word_count > 0 ) {
+ nonstatic_word_count -= 1;
+ nonstatic_word_space_count = 1; // Only one will fit
+ length -= BytesPerInt;
+ offset += BytesPerInt;
+ }
+ nonstatic_short_space_offset = offset;
+ while( length >= BytesPerShort && nonstatic_short_count > 0 ) {
+ nonstatic_short_count -= 1;
+ nonstatic_short_space_count += 1;
+ length -= BytesPerShort;
+ offset += BytesPerShort;
+ }
+ nonstatic_byte_space_offset = offset;
+ while( length > 0 && nonstatic_byte_count > 0 ) {
+ nonstatic_byte_count -= 1;
+ nonstatic_byte_space_count += 1;
+ length -= 1;
+ }
+ // Allocate oop field in the gap if there are no other fields for that.
+ nonstatic_oop_space_offset = offset;
+ if( length >= heapOopSize && nonstatic_oop_count > 0 &&
+ allocation_style != 0 ) { // when oop fields not first
+ nonstatic_oop_count -= 1;
+ nonstatic_oop_space_count = 1; // Only one will fit
+ length -= heapOopSize;
+ offset += heapOopSize;
+ }
+ }
+ }
+
+ next_nonstatic_word_offset = next_nonstatic_double_offset +
+ (nonstatic_double_count * BytesPerLong);
+ next_nonstatic_short_offset = next_nonstatic_word_offset +
+ (nonstatic_word_count * BytesPerInt);
+ next_nonstatic_byte_offset = next_nonstatic_short_offset +
+ (nonstatic_short_count * BytesPerShort);
+ next_nonstatic_padded_offset = next_nonstatic_byte_offset +
+ nonstatic_byte_count;
+
+ // let oops jump before padding with this allocation style
+ if( allocation_style == 1 ) {
+ next_nonstatic_oop_offset = next_nonstatic_padded_offset;
+ if( nonstatic_oop_count > 0 ) {
+ next_nonstatic_oop_offset = align_size_up(next_nonstatic_oop_offset, heapOopSize);
+ }
+ next_nonstatic_padded_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * heapOopSize);
+ }
+
+ // Iterate over fields again and compute correct offsets.
+ // The field allocation type was temporarily stored in the offset slot.
+ // oop fields are located before non-oop fields (static and non-static).
+ for (AllFieldStream fs(_fields, _cp); !fs.done(); fs.next()) {
+
+ // skip already laid out fields
+ if (fs.is_offset_set()) continue;
+
+ // contended instance fields are handled below
+ if (fs.is_contended() && !fs.access_flags().is_static()) continue;
+
+ int real_offset;
+ FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
+
+ // pack the rest of the fields
+ switch (atype) {
+ case STATIC_OOP:
+ real_offset = next_static_oop_offset;
+ next_static_oop_offset += heapOopSize;
+ break;
+ case STATIC_BYTE:
+ real_offset = next_static_byte_offset;
+ next_static_byte_offset += 1;
+ break;
+ case STATIC_SHORT:
+ real_offset = next_static_short_offset;
+ next_static_short_offset += BytesPerShort;
+ break;
+ case STATIC_WORD:
+ real_offset = next_static_word_offset;
+ next_static_word_offset += BytesPerInt;
+ break;
+ case STATIC_DOUBLE:
+ real_offset = next_static_double_offset;
+ next_static_double_offset += BytesPerLong;
+ break;
+ case NONSTATIC_OOP:
+ if( nonstatic_oop_space_count > 0 ) {
+ real_offset = nonstatic_oop_space_offset;
+ nonstatic_oop_space_offset += heapOopSize;
+ nonstatic_oop_space_count -= 1;
+ } else {
+ real_offset = next_nonstatic_oop_offset;
+ next_nonstatic_oop_offset += heapOopSize;
+ }
+ // Update oop maps
+ if( nonstatic_oop_map_count > 0 &&
+ nonstatic_oop_offsets[nonstatic_oop_map_count - 1] ==
+ real_offset -
+ int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) *
+ heapOopSize ) {
+ // Extend current oop map
+ nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1;
+ } else {
+ // Create new oop map
+ nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
+ nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
+ nonstatic_oop_map_count += 1;
+ if( first_nonstatic_oop_offset == 0 ) { // Undefined
+ first_nonstatic_oop_offset = real_offset;
+ }
+ }
+ break;
+ case NONSTATIC_BYTE:
+ if( nonstatic_byte_space_count > 0 ) {
+ real_offset = nonstatic_byte_space_offset;
+ nonstatic_byte_space_offset += 1;
+ nonstatic_byte_space_count -= 1;
+ } else {
+ real_offset = next_nonstatic_byte_offset;
+ next_nonstatic_byte_offset += 1;
+ }
+ break;
+ case NONSTATIC_SHORT:
+ if( nonstatic_short_space_count > 0 ) {
+ real_offset = nonstatic_short_space_offset;
+ nonstatic_short_space_offset += BytesPerShort;
+ nonstatic_short_space_count -= 1;
+ } else {
+ real_offset = next_nonstatic_short_offset;
+ next_nonstatic_short_offset += BytesPerShort;
+ }
+ break;
+ case NONSTATIC_WORD:
+ if( nonstatic_word_space_count > 0 ) {
+ real_offset = nonstatic_word_space_offset;
+ nonstatic_word_space_offset += BytesPerInt;
+ nonstatic_word_space_count -= 1;
+ } else {
+ real_offset = next_nonstatic_word_offset;
+ next_nonstatic_word_offset += BytesPerInt;
+ }
+ break;
+ case NONSTATIC_DOUBLE:
+ real_offset = next_nonstatic_double_offset;
+ next_nonstatic_double_offset += BytesPerLong;
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+ fs.set_offset(real_offset);
+ }
+
+
+ // Handle the contended cases.
+ //
+ // Each contended field should not intersect the cache line with another contended field.
+ // In the absence of alignment information, we end up with pessimistically separating
+ // the fields with full-width padding.
+ //
+ // Additionally, this should not break alignment for the fields, so we round the alignment up
+ // for each field.
+ if (contended_count > 0) {
+
+ // if there is at least one contended field, we need to have pre-padding for them
+ if (nonstatic_contended_count > 0) {
+ next_nonstatic_padded_offset += pad_size;
+ }
+
+ // collect all contended groups
+ BitMap bm(_cp->size());
+ for (AllFieldStream fs(_fields, _cp); !fs.done(); fs.next()) {
+ // skip already laid out fields
+ if (fs.is_offset_set()) continue;
+
+ if (fs.is_contended()) {
+ bm.set_bit(fs.contended_group());
+ }
+ }
+
+ int current_group = -1;
+ while ((current_group = (int)bm.get_next_one_offset(current_group + 1)) != (int)bm.size()) {
+
+ for (AllFieldStream fs(_fields, _cp); !fs.done(); fs.next()) {
+
+ // skip already laid out fields
+ if (fs.is_offset_set()) continue;
+
+ // skip non-contended fields and fields from different group
+ if (!fs.is_contended() || (fs.contended_group() != current_group)) continue;
+
+ // handle statics below
+ if (fs.access_flags().is_static()) continue;
+
+ int real_offset;
+ FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
+
+ switch (atype) {
+ case NONSTATIC_BYTE:
+ next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, 1);
+ real_offset = next_nonstatic_padded_offset;
+ next_nonstatic_padded_offset += 1;
+ break;
+
+ case NONSTATIC_SHORT:
+ next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerShort);
+ real_offset = next_nonstatic_padded_offset;
+ next_nonstatic_padded_offset += BytesPerShort;
+ break;
+
+ case NONSTATIC_WORD:
+ next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerInt);
+ real_offset = next_nonstatic_padded_offset;
+ next_nonstatic_padded_offset += BytesPerInt;
+ break;
+
+ case NONSTATIC_DOUBLE:
+ next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerLong);
+ real_offset = next_nonstatic_padded_offset;
+ next_nonstatic_padded_offset += BytesPerLong;
+ break;
+
+ case NONSTATIC_OOP:
+ next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, heapOopSize);
+ real_offset = next_nonstatic_padded_offset;
+ next_nonstatic_padded_offset += heapOopSize;
+
+ // Create new oop map
+ nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
+ nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
+ nonstatic_oop_map_count += 1;
+ if( first_nonstatic_oop_offset == 0 ) { // Undefined
+ first_nonstatic_oop_offset = real_offset;
+ }
+ break;
+
+ default:
+ ShouldNotReachHere();
+ }
+
+ if (fs.contended_group() == 0) {
+ // Contended group defines the equivalence class over the fields:
+ // the fields within the same contended group are not inter-padded.
+ // The only exception is default group, which does not incur the
+ // equivalence, and so requires intra-padding.
+ next_nonstatic_padded_offset += pad_size;
+ }
+
+ fs.set_offset(real_offset);
+ } // for
+
+ // Start laying out the next group.
+ // Note that this will effectively pad the last group in the back;
+ // this is expected to alleviate memory contention effects for
+ // subclass fields and/or adjacent object.
+ // If this was the default group, the padding is already in place.
+ if (current_group != 0) {
+ next_nonstatic_padded_offset += pad_size;
+ }
+ }
+
+ // handle static fields
+ }
+
+ // Size of instances
+ int notaligned_offset = next_nonstatic_padded_offset;
+
+ // Entire class is contended, pad in the back.
+ // This helps to alleviate memory contention effects for subclass fields
+ // and/or adjacent object.
+ if (parsed_annotations->is_contended()) {
+ notaligned_offset += pad_size;
+ }
+
+ int next_static_type_offset = align_size_up(next_static_byte_offset, wordSize);
+ int static_field_size = (next_static_type_offset -
+ InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
+
+ next_nonstatic_type_offset = align_size_up(notaligned_offset, heapOopSize );
+ nonstatic_field_size = nonstatic_field_size + ((next_nonstatic_type_offset
+ - first_nonstatic_field_offset)/heapOopSize);
+
+ next_nonstatic_type_offset = align_size_up(notaligned_offset, wordSize );
+ int instance_size = align_object_size(next_nonstatic_type_offset / wordSize);
+
+ assert(instance_size == align_object_size(align_size_up(
+ (instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize + ((parsed_annotations->is_contended()) ? pad_size : 0)),
+ wordSize) / wordSize), "consistent layout helper value");
+
+ // Number of non-static oop map blocks allocated at end of klass.
+ const unsigned int total_oop_map_count =
+ compute_oop_map_count(_super_klass, nonstatic_oop_map_count,
+ first_nonstatic_oop_offset);
+
+#ifndef PRODUCT
+ if( PrintCompactFieldsSavings ) {
+ ResourceMark rm;
+ if( nonstatic_field_size < orig_nonstatic_field_size ) {
+ tty->print("[Saved %d of %d bytes in %s]\n",
+ (orig_nonstatic_field_size - nonstatic_field_size)*heapOopSize,
+ orig_nonstatic_field_size*heapOopSize,
+ _class_name);
+ } else if( nonstatic_field_size > orig_nonstatic_field_size ) {
+ tty->print("[Wasted %d over %d bytes in %s]\n",
+ (nonstatic_field_size - orig_nonstatic_field_size)*heapOopSize,
+ orig_nonstatic_field_size*heapOopSize,
+ _class_name);
+ }
+ }
+
+ if (PrintFieldLayout) {
+ print_field_layout(_class_name,
+ _fields,
+ _cp,
+ instance_size,
+ first_nonstatic_field_offset,
+ next_nonstatic_field_offset,
+ next_static_type_offset);
+ }
+
+#endif
+ // Pass back information needed for InstanceKlass creation
+ info->nonstatic_oop_offsets = nonstatic_oop_offsets;
+ info->nonstatic_oop_counts = nonstatic_oop_counts;
+ info->nonstatic_oop_map_count = nonstatic_oop_map_count;
+ info->total_oop_map_count = total_oop_map_count;
+ info->instance_size = instance_size;
+ info->static_field_size = static_field_size;
+ info->nonstatic_field_size = nonstatic_field_size;
+ info->has_nonstatic_fields = has_nonstatic_fields;
+}
+
+
instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
ClassLoaderData* loader_data,
Handle protection_domain,
@@ -3176,7 +3673,7 @@
jt->get_thread_stat()->perf_timers_addr(),
PerfClassTraceTime::PARSE_CLASS);
- init_parsed_class_attributes();
+ init_parsed_class_attributes(loader_data);
if (JvmtiExport::should_post_class_file_load_hook()) {
// Get the cached class file bytes (if any) from the class that
@@ -3271,8 +3768,7 @@
_relax_verify = Verifier::relax_verify_for(class_loader());
// Constant pool
- constantPoolHandle cp = parse_constant_pool(loader_data, CHECK_(nullHandle));
- ConstantPoolCleaner error_handler(cp); // set constant pool to be cleaned up.
+ constantPoolHandle cp = parse_constant_pool(CHECK_(nullHandle));
int cp_size = cp->length();
@@ -3290,7 +3786,6 @@
access_flags.set_flags(flags);
// This class and superclass
- instanceKlassHandle super_klass;
u2 this_class_index = cfs->get_u2_fast();
check_property(
valid_cp_range(this_class_index, cp_size) &&
@@ -3345,59 +3840,27 @@
}
u2 super_class_index = cfs->get_u2_fast();
- if (super_class_index == 0) {
- check_property(class_name == vmSymbols::java_lang_Object(),
- "Invalid superclass index %u in class file %s",
- super_class_index,
- CHECK_(nullHandle));
- } else {
- check_property(valid_cp_range(super_class_index, cp_size) &&
- is_klass_reference(cp, super_class_index),
- "Invalid superclass index %u in class file %s",
- super_class_index,
- CHECK_(nullHandle));
- // The class name should be legal because it is checked when parsing constant pool.
- // However, make sure it is not an array type.
- bool is_array = false;
- if (cp->tag_at(super_class_index).is_klass()) {
- super_klass = instanceKlassHandle(THREAD, cp->resolved_klass_at(super_class_index));
- if (_need_verify)
- is_array = super_klass->oop_is_array();
- } else if (_need_verify) {
- is_array = (cp->unresolved_klass_at(super_class_index)->byte_at(0) == JVM_SIGNATURE_ARRAY);
- }
- if (_need_verify) {
- guarantee_property(!is_array,
- "Bad superclass name in class file %s", CHECK_(nullHandle));
- }
- }
+ instanceKlassHandle super_klass = parse_super_class(super_class_index,
+ CHECK_NULL);
// Interfaces
u2 itfs_len = cfs->get_u2_fast();
- Array<Klass*>* local_interfaces;
- if (itfs_len == 0) {
- local_interfaces = Universe::the_empty_klass_array();
- } else {
- local_interfaces = parse_interfaces(
- cp, itfs_len, loader_data, protection_domain, _class_name,
- &has_default_methods, CHECK_(nullHandle));
- }
+ Array<Klass*>* local_interfaces =
+ parse_interfaces(itfs_len, protection_domain, _class_name,
+ &has_default_methods, CHECK_(nullHandle));
u2 java_fields_count = 0;
// Fields (offsets are filled in later)
FieldAllocationCount fac;
- Array<AnnotationArray*>* fields_annotations = NULL;
- Array<AnnotationArray*>* fields_type_annotations = NULL;
- Array<u2>* fields = parse_fields(loader_data, class_name, cp, access_flags.is_interface(), &fac, &fields_annotations,
- &fields_type_annotations,
- &java_fields_count,
- CHECK_(nullHandle));
+ Array<u2>* fields = parse_fields(class_name,
+ access_flags.is_interface(),
+ &fac, &java_fields_count,
+ CHECK_(nullHandle));
// Methods
bool has_final_method = false;
AccessFlags promoted_flags;
promoted_flags.set_flags(0);
- Array<Method*>* methods = parse_methods(loader_data,
- cp, access_flags.is_interface(),
+ Array<Method*>* methods = parse_methods(access_flags.is_interface(),
&promoted_flags,
&has_final_method,
&has_default_methods,
@@ -3405,7 +3868,7 @@
// Additional attributes
ClassAnnotationCollector parsed_annotations;
- parse_classfile_attributes(loader_data, cp, &parsed_annotations, CHECK_(nullHandle));
+ parse_classfile_attributes(&parsed_annotations, CHECK_(nullHandle));
// Make sure this is the end of class file stream
guarantee_property(cfs->at_eos(), "Extra bytes at the end of class file %s", CHECK_(nullHandle));
@@ -3452,13 +3915,15 @@
}
}
+ // save super klass for error handling.
+ _super_klass = super_klass;
+
// Compute the transitive list of all unique interfaces implemented by this class
- Array<Klass*>* transitive_interfaces = compute_transitive_interfaces(loader_data, super_klass, local_interfaces, CHECK_(nullHandle));
+ _transitive_interfaces =
+ compute_transitive_interfaces(super_klass, local_interfaces, CHECK_(nullHandle));
// sort methods
- Array<int>* method_ordering = sort_methods(loader_data,
- methods,
- CHECK_(nullHandle));
+ intArray* method_ordering = sort_methods(methods);
// promote flags from parse_methods() to the klass' flags
access_flags.add_promoted_flags(promoted_flags.as_int());
@@ -3476,587 +3941,14 @@
CHECK_(nullHandle));
// Size of Java itable (in words)
- itable_size = access_flags.is_interface() ? 0 : klassItable::compute_itable_size(transitive_interfaces);
-
- // get the padding width from the option
- // TODO: Ask VM about specific CPU we are running on
- int pad_size = ContendedPaddingWidth;
-
- // Field size and offset computation
- int nonstatic_field_size = super_klass() == NULL ? 0 : super_klass->nonstatic_field_size();
-#ifndef PRODUCT
- int orig_nonstatic_field_size = 0;
-#endif
- int next_static_oop_offset;
- int next_static_double_offset;
- int next_static_word_offset;
- int next_static_short_offset;
- int next_static_byte_offset;
- int next_static_padded_offset;
- int next_nonstatic_oop_offset;
- int next_nonstatic_double_offset;
- int next_nonstatic_word_offset;
- int next_nonstatic_short_offset;
- int next_nonstatic_byte_offset;
- int next_nonstatic_type_offset;
- int first_nonstatic_oop_offset;
- int first_nonstatic_field_offset;
- int next_nonstatic_field_offset;
- int next_nonstatic_padded_offset;
-
- // Count the contended fields by type.
- int static_contended_count = 0;
- int nonstatic_contended_count = 0;
- FieldAllocationCount fac_contended;
- for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
- FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
- if (fs.is_contended()) {
- fac_contended.count[atype]++;
- if (fs.access_flags().is_static()) {
- static_contended_count++;
- } else {
- nonstatic_contended_count++;
- }
- }
- }
- int contended_count = static_contended_count + nonstatic_contended_count;
-
-
- // Calculate the starting byte offsets
- next_static_oop_offset = InstanceMirrorKlass::offset_of_static_fields();
-
- // class is contended, pad before all the fields
- if (parsed_annotations.is_contended()) {
- next_static_oop_offset += pad_size;
- }
-
- next_static_double_offset = next_static_oop_offset +
- ((fac.count[STATIC_OOP] - fac_contended.count[STATIC_OOP]) * heapOopSize);
- if ( fac.count[STATIC_DOUBLE] &&
- (Universe::field_type_should_be_aligned(T_DOUBLE) ||
- Universe::field_type_should_be_aligned(T_LONG)) ) {
- next_static_double_offset = align_size_up(next_static_double_offset, BytesPerLong);
- }
-
- next_static_word_offset = next_static_double_offset +
- ((fac.count[STATIC_DOUBLE] - fac_contended.count[STATIC_DOUBLE]) * BytesPerLong);
- next_static_short_offset = next_static_word_offset +
- ((fac.count[STATIC_WORD] - fac_contended.count[STATIC_WORD]) * BytesPerInt);
- next_static_byte_offset = next_static_short_offset +
- ((fac.count[STATIC_SHORT] - fac_contended.count[STATIC_SHORT]) * BytesPerShort);
- next_static_padded_offset = next_static_byte_offset +
- ((fac.count[STATIC_BYTE] - fac_contended.count[STATIC_BYTE]) * 1);
-
- first_nonstatic_field_offset = instanceOopDesc::base_offset_in_bytes() +
- nonstatic_field_size * heapOopSize;
-
- // class is contended, pad before all the fields
- if (parsed_annotations.is_contended()) {
- first_nonstatic_field_offset += pad_size;
- }
-
- next_nonstatic_field_offset = first_nonstatic_field_offset;
-
- unsigned int nonstatic_double_count = fac.count[NONSTATIC_DOUBLE] - fac_contended.count[NONSTATIC_DOUBLE];
- unsigned int nonstatic_word_count = fac.count[NONSTATIC_WORD] - fac_contended.count[NONSTATIC_WORD];
- unsigned int nonstatic_short_count = fac.count[NONSTATIC_SHORT] - fac_contended.count[NONSTATIC_SHORT];
- unsigned int nonstatic_byte_count = fac.count[NONSTATIC_BYTE] - fac_contended.count[NONSTATIC_BYTE];
- unsigned int nonstatic_oop_count = fac.count[NONSTATIC_OOP] - fac_contended.count[NONSTATIC_OOP];
-
- bool super_has_nonstatic_fields =
- (super_klass() != NULL && super_klass->has_nonstatic_fields());
- bool has_nonstatic_fields = super_has_nonstatic_fields ||
- ((nonstatic_double_count + nonstatic_word_count +
- nonstatic_short_count + nonstatic_byte_count +
- nonstatic_oop_count) != 0);
-
-
- // Prepare list of oops for oop map generation.
- int* nonstatic_oop_offsets;
- unsigned int* nonstatic_oop_counts;
- unsigned int nonstatic_oop_map_count = 0;
-
- nonstatic_oop_offsets = NEW_RESOURCE_ARRAY_IN_THREAD(
- THREAD, int, nonstatic_oop_count + 1);
- nonstatic_oop_counts = NEW_RESOURCE_ARRAY_IN_THREAD(
- THREAD, unsigned int, nonstatic_oop_count + 1);
-
- first_nonstatic_oop_offset = 0; // will be set for first oop field
-
-#ifndef PRODUCT
- if( PrintCompactFieldsSavings ) {
- next_nonstatic_double_offset = next_nonstatic_field_offset +
- (nonstatic_oop_count * heapOopSize);
- if ( nonstatic_double_count > 0 ) {
- next_nonstatic_double_offset = align_size_up(next_nonstatic_double_offset, BytesPerLong);
- }
- next_nonstatic_word_offset = next_nonstatic_double_offset +
- (nonstatic_double_count * BytesPerLong);
- next_nonstatic_short_offset = next_nonstatic_word_offset +
- (nonstatic_word_count * BytesPerInt);
- next_nonstatic_byte_offset = next_nonstatic_short_offset +
- (nonstatic_short_count * BytesPerShort);
- next_nonstatic_type_offset = align_size_up((next_nonstatic_byte_offset +
- nonstatic_byte_count ), heapOopSize );
- orig_nonstatic_field_size = nonstatic_field_size +
- ((next_nonstatic_type_offset - first_nonstatic_field_offset)/heapOopSize);
- }
-#endif
- bool compact_fields = CompactFields;
- int allocation_style = FieldsAllocationStyle;
- if( allocation_style < 0 || allocation_style > 2 ) { // Out of range?
- assert(false, "0 <= FieldsAllocationStyle <= 2");
- allocation_style = 1; // Optimistic
- }
-
- // The next classes have predefined hard-coded fields offsets
- // (see in JavaClasses::compute_hard_coded_offsets()).
- // Use default fields allocation order for them.
- if( (allocation_style != 0 || compact_fields ) && class_loader.is_null() &&
- (class_name == vmSymbols::java_lang_AssertionStatusDirectives() ||
- class_name == vmSymbols::java_lang_Class() ||
- class_name == vmSymbols::java_lang_ClassLoader() ||
- class_name == vmSymbols::java_lang_ref_Reference() ||
- class_name == vmSymbols::java_lang_ref_SoftReference() ||
- class_name == vmSymbols::java_lang_StackTraceElement() ||
- class_name == vmSymbols::java_lang_String() ||
- class_name == vmSymbols::java_lang_Throwable() ||
- class_name == vmSymbols::java_lang_Boolean() ||
- class_name == vmSymbols::java_lang_Character() ||
- class_name == vmSymbols::java_lang_Float() ||
- class_name == vmSymbols::java_lang_Double() ||
- class_name == vmSymbols::java_lang_Byte() ||
- class_name == vmSymbols::java_lang_Short() ||
- class_name == vmSymbols::java_lang_Integer() ||
- class_name == vmSymbols::java_lang_Long())) {
- allocation_style = 0; // Allocate oops first
- compact_fields = false; // Don't compact fields
- }
-
- if( allocation_style == 0 ) {
- // Fields order: oops, longs/doubles, ints, shorts/chars, bytes, padded fields
- next_nonstatic_oop_offset = next_nonstatic_field_offset;
- next_nonstatic_double_offset = next_nonstatic_oop_offset +
- (nonstatic_oop_count * heapOopSize);
- } else if( allocation_style == 1 ) {
- // Fields order: longs/doubles, ints, shorts/chars, bytes, oops, padded fields
- next_nonstatic_double_offset = next_nonstatic_field_offset;
- } else if( allocation_style == 2 ) {
- // Fields allocation: oops fields in super and sub classes are together.
- if( nonstatic_field_size > 0 && super_klass() != NULL &&
- super_klass->nonstatic_oop_map_size() > 0 ) {
- int map_count = super_klass->nonstatic_oop_map_count();
- OopMapBlock* first_map = super_klass->start_of_nonstatic_oop_maps();
- OopMapBlock* last_map = first_map + map_count - 1;
- int next_offset = last_map->offset() + (last_map->count() * heapOopSize);
- if (next_offset == next_nonstatic_field_offset) {
- allocation_style = 0; // allocate oops first
- next_nonstatic_oop_offset = next_nonstatic_field_offset;
- next_nonstatic_double_offset = next_nonstatic_oop_offset +
- (nonstatic_oop_count * heapOopSize);
- }
- }
- if( allocation_style == 2 ) {
- allocation_style = 1; // allocate oops last
- next_nonstatic_double_offset = next_nonstatic_field_offset;
- }
- } else {
- ShouldNotReachHere();
- }
-
- int nonstatic_oop_space_count = 0;
- int nonstatic_word_space_count = 0;
- int nonstatic_short_space_count = 0;
- int nonstatic_byte_space_count = 0;
- int nonstatic_oop_space_offset;
- int nonstatic_word_space_offset;
- int nonstatic_short_space_offset;
- int nonstatic_byte_space_offset;
-
- if( nonstatic_double_count > 0 ) {
- int offset = next_nonstatic_double_offset;
- next_nonstatic_double_offset = align_size_up(offset, BytesPerLong);
- if( compact_fields && offset != next_nonstatic_double_offset ) {
- // Allocate available fields into the gap before double field.
- int length = next_nonstatic_double_offset - offset;
- assert(length == BytesPerInt, "");
- nonstatic_word_space_offset = offset;
- if( nonstatic_word_count > 0 ) {
- nonstatic_word_count -= 1;
- nonstatic_word_space_count = 1; // Only one will fit
- length -= BytesPerInt;
- offset += BytesPerInt;
- }
- nonstatic_short_space_offset = offset;
- while( length >= BytesPerShort && nonstatic_short_count > 0 ) {
- nonstatic_short_count -= 1;
- nonstatic_short_space_count += 1;
- length -= BytesPerShort;
- offset += BytesPerShort;
- }
- nonstatic_byte_space_offset = offset;
- while( length > 0 && nonstatic_byte_count > 0 ) {
- nonstatic_byte_count -= 1;
- nonstatic_byte_space_count += 1;
- length -= 1;
- }
- // Allocate oop field in the gap if there are no other fields for that.
- nonstatic_oop_space_offset = offset;
- if( length >= heapOopSize && nonstatic_oop_count > 0 &&
- allocation_style != 0 ) { // when oop fields not first
- nonstatic_oop_count -= 1;
- nonstatic_oop_space_count = 1; // Only one will fit
- length -= heapOopSize;
- offset += heapOopSize;
- }
- }
- }
-
- next_nonstatic_word_offset = next_nonstatic_double_offset +
- (nonstatic_double_count * BytesPerLong);
- next_nonstatic_short_offset = next_nonstatic_word_offset +
- (nonstatic_word_count * BytesPerInt);
- next_nonstatic_byte_offset = next_nonstatic_short_offset +
- (nonstatic_short_count * BytesPerShort);
- next_nonstatic_padded_offset = next_nonstatic_byte_offset +
- nonstatic_byte_count;
-
- // let oops jump before padding with this allocation style
- if( allocation_style == 1 ) {
- next_nonstatic_oop_offset = next_nonstatic_padded_offset;
- if( nonstatic_oop_count > 0 ) {
- next_nonstatic_oop_offset = align_size_up(next_nonstatic_oop_offset, heapOopSize);
- }
- next_nonstatic_padded_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * heapOopSize);
- }
-
- // Iterate over fields again and compute correct offsets.
- // The field allocation type was temporarily stored in the offset slot.
- // oop fields are located before non-oop fields (static and non-static).
- for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
-
- // skip already laid out fields
- if (fs.is_offset_set()) continue;
-
- // contended fields are handled below
- if (fs.is_contended()) continue;
-
- int real_offset;
- FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
-
- // pack the rest of the fields
- switch (atype) {
- case STATIC_OOP:
- real_offset = next_static_oop_offset;
- next_static_oop_offset += heapOopSize;
- break;
- case STATIC_BYTE:
- real_offset = next_static_byte_offset;
- next_static_byte_offset += 1;
- break;
- case STATIC_SHORT:
- real_offset = next_static_short_offset;
- next_static_short_offset += BytesPerShort;
- break;
- case STATIC_WORD:
- real_offset = next_static_word_offset;
- next_static_word_offset += BytesPerInt;
- break;
- case STATIC_DOUBLE:
- real_offset = next_static_double_offset;
- next_static_double_offset += BytesPerLong;
- break;
- case NONSTATIC_OOP:
- if( nonstatic_oop_space_count > 0 ) {
- real_offset = nonstatic_oop_space_offset;
- nonstatic_oop_space_offset += heapOopSize;
- nonstatic_oop_space_count -= 1;
- } else {
- real_offset = next_nonstatic_oop_offset;
- next_nonstatic_oop_offset += heapOopSize;
- }
- // Update oop maps
- if( nonstatic_oop_map_count > 0 &&
- nonstatic_oop_offsets[nonstatic_oop_map_count - 1] ==
- real_offset -
- int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) *
- heapOopSize ) {
- // Extend current oop map
- nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1;
- } else {
- // Create new oop map
- nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
- nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
- nonstatic_oop_map_count += 1;
- if( first_nonstatic_oop_offset == 0 ) { // Undefined
- first_nonstatic_oop_offset = real_offset;
- }
- }
- break;
- case NONSTATIC_BYTE:
- if( nonstatic_byte_space_count > 0 ) {
- real_offset = nonstatic_byte_space_offset;
- nonstatic_byte_space_offset += 1;
- nonstatic_byte_space_count -= 1;
- } else {
- real_offset = next_nonstatic_byte_offset;
- next_nonstatic_byte_offset += 1;
- }
- break;
- case NONSTATIC_SHORT:
- if( nonstatic_short_space_count > 0 ) {
- real_offset = nonstatic_short_space_offset;
- nonstatic_short_space_offset += BytesPerShort;
- nonstatic_short_space_count -= 1;
- } else {
- real_offset = next_nonstatic_short_offset;
- next_nonstatic_short_offset += BytesPerShort;
- }
- break;
- case NONSTATIC_WORD:
- if( nonstatic_word_space_count > 0 ) {
- real_offset = nonstatic_word_space_offset;
- nonstatic_word_space_offset += BytesPerInt;
- nonstatic_word_space_count -= 1;
- } else {
- real_offset = next_nonstatic_word_offset;
- next_nonstatic_word_offset += BytesPerInt;
- }
- break;
- case NONSTATIC_DOUBLE:
- real_offset = next_nonstatic_double_offset;
- next_nonstatic_double_offset += BytesPerLong;
- break;
- default:
- ShouldNotReachHere();
- }
- fs.set_offset(real_offset);
- }
-
-
- // Handle the contended cases.
- //
- // Each contended field should not intersect the cache line with another contended field.
- // In the absence of alignment information, we end up with pessimistically separating
- // the fields with full-width padding.
- //
- // Additionally, this should not break alignment for the fields, so we round the alignment up
- // for each field.
- if (contended_count > 0) {
-
- // if there is at least one contended field, we need to have pre-padding for them
- if (nonstatic_contended_count > 0) {
- next_nonstatic_padded_offset += pad_size;
- }
-
- // collect all contended groups
- BitMap bm(cp->size());
- for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
- // skip already laid out fields
- if (fs.is_offset_set()) continue;
-
- if (fs.is_contended()) {
- bm.set_bit(fs.contended_group());
- }
- }
-
- int current_group = -1;
- while ((current_group = (int)bm.get_next_one_offset(current_group + 1)) != (int)bm.size()) {
-
- for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
-
- // skip already laid out fields
- if (fs.is_offset_set()) continue;
-
- // skip non-contended fields and fields from different group
- if (!fs.is_contended() || (fs.contended_group() != current_group)) continue;
-
- // handle statics below
- if (fs.access_flags().is_static()) continue;
-
- int real_offset;
- FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
-
- switch (atype) {
- case NONSTATIC_BYTE:
- next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, 1);
- real_offset = next_nonstatic_padded_offset;
- next_nonstatic_padded_offset += 1;
- break;
-
- case NONSTATIC_SHORT:
- next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerShort);
- real_offset = next_nonstatic_padded_offset;
- next_nonstatic_padded_offset += BytesPerShort;
- break;
-
- case NONSTATIC_WORD:
- next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerInt);
- real_offset = next_nonstatic_padded_offset;
- next_nonstatic_padded_offset += BytesPerInt;
- break;
-
- case NONSTATIC_DOUBLE:
- next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerLong);
- real_offset = next_nonstatic_padded_offset;
- next_nonstatic_padded_offset += BytesPerLong;
- break;
-
- case NONSTATIC_OOP:
- next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, heapOopSize);
- real_offset = next_nonstatic_padded_offset;
- next_nonstatic_padded_offset += heapOopSize;
-
- // Create new oop map
- nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
- nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
- nonstatic_oop_map_count += 1;
- if( first_nonstatic_oop_offset == 0 ) { // Undefined
- first_nonstatic_oop_offset = real_offset;
- }
- break;
-
- default:
- ShouldNotReachHere();
- }
-
- if (fs.contended_group() == 0) {
- // Contended group defines the equivalence class over the fields:
- // the fields within the same contended group are not inter-padded.
- // The only exception is default group, which does not incur the
- // equivalence, and so requires intra-padding.
- next_nonstatic_padded_offset += pad_size;
- }
-
- fs.set_offset(real_offset);
- } // for
-
- // Start laying out the next group.
- // Note that this will effectively pad the last group in the back;
- // this is expected to alleviate memory contention effects for
- // subclass fields and/or adjacent object.
- // If this was the default group, the padding is already in place.
- if (current_group != 0) {
- next_nonstatic_padded_offset += pad_size;
- }
- }
-
- // handle static fields
-
- // if there is at least one contended field, we need to have pre-padding for them
- if (static_contended_count > 0) {
- next_static_padded_offset += pad_size;
- }
-
- current_group = -1;
- while ((current_group = (int)bm.get_next_one_offset(current_group + 1)) != (int)bm.size()) {
-
- for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
-
- // skip already laid out fields
- if (fs.is_offset_set()) continue;
-
- // skip non-contended fields and fields from different group
- if (!fs.is_contended() || (fs.contended_group() != current_group)) continue;
-
- // non-statics already handled above
- if (!fs.access_flags().is_static()) continue;
-
- int real_offset;
- FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
-
- switch (atype) {
-
- case STATIC_BYTE:
- next_static_padded_offset = align_size_up(next_static_padded_offset, 1);
- real_offset = next_static_padded_offset;
- next_static_padded_offset += 1;
- break;
-
- case STATIC_SHORT:
- next_static_padded_offset = align_size_up(next_static_padded_offset, BytesPerShort);
- real_offset = next_static_padded_offset;
- next_static_padded_offset += BytesPerShort;
- break;
-
- case STATIC_WORD:
- next_static_padded_offset = align_size_up(next_static_padded_offset, BytesPerInt);
- real_offset = next_static_padded_offset;
- next_static_padded_offset += BytesPerInt;
- break;
-
- case STATIC_DOUBLE:
- next_static_padded_offset = align_size_up(next_static_padded_offset, BytesPerLong);
- real_offset = next_static_padded_offset;
- next_static_padded_offset += BytesPerLong;
- break;
-
- case STATIC_OOP:
- next_static_padded_offset = align_size_up(next_static_padded_offset, heapOopSize);
- real_offset = next_static_padded_offset;
- next_static_padded_offset += heapOopSize;
- break;
-
- default:
- ShouldNotReachHere();
- }
-
- if (fs.contended_group() == 0) {
- // Contended group defines the equivalence class over the fields:
- // the fields within the same contended group are not inter-padded.
- // The only exception is default group, which does not incur the
- // equivalence, and so requires intra-padding.
- next_static_padded_offset += pad_size;
- }
-
- fs.set_offset(real_offset);
- } // for
-
- // Start laying out the next group.
- // Note that this will effectively pad the last group in the back;
- // this is expected to alleviate memory contention effects for
- // subclass fields and/or adjacent object.
- // If this was the default group, the padding is already in place.
- if (current_group != 0) {
- next_static_padded_offset += pad_size;
- }
-
- }
-
- } // handle contended
-
- // Size of instances
- int instance_size;
-
- int notaligned_offset = next_nonstatic_padded_offset;
-
- // Entire class is contended, pad in the back.
- // This helps to alleviate memory contention effects for subclass fields
- // and/or adjacent object.
- if (parsed_annotations.is_contended()) {
- notaligned_offset += pad_size;
- next_static_padded_offset += pad_size;
- }
-
- int next_static_type_offset = align_size_up(next_static_padded_offset, wordSize);
- int static_field_size = (next_static_type_offset -
- InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
-
- next_nonstatic_type_offset = align_size_up(notaligned_offset, heapOopSize );
- nonstatic_field_size = nonstatic_field_size + ((next_nonstatic_type_offset
- - first_nonstatic_field_offset)/heapOopSize);
-
- next_nonstatic_type_offset = align_size_up(notaligned_offset, wordSize );
- instance_size = align_object_size(next_nonstatic_type_offset / wordSize);
-
- assert(instance_size == align_object_size(align_size_up(
- (instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize + ((parsed_annotations.is_contended()) ? pad_size : 0)),
- wordSize) / wordSize), "consistent layout helper value");
-
- // Number of non-static oop map blocks allocated at end of klass.
- const unsigned int total_oop_map_count =
- compute_oop_map_count(super_klass, nonstatic_oop_map_count,
- first_nonstatic_oop_offset);
+ itable_size = access_flags.is_interface() ? 0 : klassItable::compute_itable_size(_transitive_interfaces);
+
+ FieldLayoutInfo info;
+ layout_fields(class_loader, &fac, &parsed_annotations, &info, CHECK_NULL);
+
+ int total_oop_map_size2 =
+ InstanceKlass::nonstatic_oop_map_size(info.total_oop_map_count);
+
// Compute reference type
ReferenceType rt;
if (super_klass() == NULL) {
@@ -4066,53 +3958,42 @@
}
// We can now create the basic Klass* for this klass
- int total_oop_map_size2 =
- InstanceKlass::nonstatic_oop_map_size(total_oop_map_count);
-
- Klass* ik = InstanceKlass::allocate_instance_klass(loader_data,
- vtable_size,
- itable_size,
- static_field_size,
- total_oop_map_size2,
- rt,
- access_flags,
- name,
- super_klass(),
- !host_klass.is_null(),
- CHECK_(nullHandle));
-
- // Add all classes to our internal class loader list here,
- // including classes in the bootstrap (NULL) class loader.
- loader_data->add_class(ik);
-
- instanceKlassHandle this_klass (THREAD, ik);
-
- assert(this_klass->static_field_size() == static_field_size, "sanity");
- assert(this_klass->nonstatic_oop_map_count() == total_oop_map_count,
+ _klass = InstanceKlass::allocate_instance_klass(loader_data,
+ vtable_size,
+ itable_size,
+ info.static_field_size,
+ total_oop_map_size2,
+ rt,
+ access_flags,
+ name,
+ super_klass(),
+ !host_klass.is_null(),
+ CHECK_(nullHandle));
+ instanceKlassHandle this_klass (THREAD, _klass);
+
+ assert(this_klass->static_field_size() == info.static_field_size, "sanity");
+ assert(this_klass->nonstatic_oop_map_count() == info.total_oop_map_count,
"sanity");
// Fill in information already parsed
this_klass->set_should_verify_class(verify);
- jint lh = Klass::instance_layout_helper(instance_size, false);
+ jint lh = Klass::instance_layout_helper(info.instance_size, false);
this_klass->set_layout_helper(lh);
assert(this_klass->oop_is_instance(), "layout is correct");
- assert(this_klass->size_helper() == instance_size, "correct size_helper");
+ assert(this_klass->size_helper() == info.instance_size, "correct size_helper");
// Not yet: supers are done below to support the new subtype-checking fields
//this_klass->set_super(super_klass());
this_klass->set_class_loader_data(loader_data);
- this_klass->set_nonstatic_field_size(nonstatic_field_size);
- this_klass->set_has_nonstatic_fields(has_nonstatic_fields);
+ this_klass->set_nonstatic_field_size(info.nonstatic_field_size);
+ this_klass->set_has_nonstatic_fields(info.has_nonstatic_fields);
this_klass->set_static_oop_field_count(fac.count[STATIC_OOP]);
- cp->set_pool_holder(this_klass());
- error_handler.set_in_error(false); // turn off error handler for cp
- this_klass->set_constants(cp());
- this_klass->set_local_interfaces(local_interfaces);
- this_klass->set_fields(fields, java_fields_count);
- this_klass->set_methods(methods);
+
+ apply_parsed_class_metadata(this_klass, java_fields_count, CHECK_NULL);
+
if (has_final_method) {
this_klass->set_has_final_method();
}
- this_klass->set_method_ordering(method_ordering);
+ this_klass->copy_method_ordering(method_ordering, CHECK_NULL);
// The InstanceKlass::_methods_jmethod_ids cache and the
// InstanceKlass::_methods_cached_itable_indices cache are
// both managed on the assumption that the initial cache
@@ -4124,17 +4005,6 @@
if (is_anonymous()) // I am well known to myself
cp->klass_at_put(this_class_index, this_klass()); // eagerly resolve
- // Assign allocations if needed
- if (_annotations != NULL || _type_annotations != NULL ||
- fields_annotations != NULL || fields_type_annotations != NULL) {
- Annotations* annotations = Annotations::allocate(loader_data, CHECK_NULL);
- annotations->set_class_annotations(_annotations);
- annotations->set_class_type_annotations(_type_annotations);
- annotations->set_fields_annotations(fields_annotations);
- annotations->set_fields_type_annotations(fields_type_annotations);
- this_klass->set_annotations(annotations);
- }
-
this_klass->set_minor_version(minor_version);
this_klass->set_major_version(major_version);
this_klass->set_has_default_methods(has_default_methods);
@@ -4169,8 +4039,6 @@
this_klass->set_has_miranda_methods(); // then set a flag
}
- this_klass->set_transitive_interfaces(transitive_interfaces);
-
// Fill in information needed to compute superclasses.
this_klass->initialize_supers(super_klass(), CHECK_(nullHandle));
@@ -4179,7 +4047,7 @@
// Compute transitive closure of interfaces this class implements
// Do final class setup
- fill_oop_maps(this_klass, nonstatic_oop_map_count, nonstatic_oop_offsets, nonstatic_oop_counts);
+ fill_oop_maps(this_klass, info.nonstatic_oop_map_count, info.nonstatic_oop_offsets, info.nonstatic_oop_counts);
// Fill in has_finalizer, has_vanilla_constructor, and layout_helper
set_precomputed_flags(this_klass);
@@ -4278,35 +4146,6 @@
}
}
-#ifndef PRODUCT
- if( PrintCompactFieldsSavings ) {
- ResourceMark rm;
- if( nonstatic_field_size < orig_nonstatic_field_size ) {
- tty->print("[Saved %d of %d bytes in %s]\n",
- (orig_nonstatic_field_size - nonstatic_field_size)*heapOopSize,
- orig_nonstatic_field_size*heapOopSize,
- this_klass->external_name());
- } else if( nonstatic_field_size > orig_nonstatic_field_size ) {
- tty->print("[Wasted %d over %d bytes in %s]\n",
- (nonstatic_field_size - orig_nonstatic_field_size)*heapOopSize,
- orig_nonstatic_field_size*heapOopSize,
- this_klass->external_name());
- }
- }
-#endif
-
-#ifndef PRODUCT
- if (PrintFieldLayout) {
- print_field_layout(name,
- fields,
- cp,
- instance_size,
- first_nonstatic_field_offset,
- next_nonstatic_field_offset,
- next_static_type_offset);
- }
-#endif
-
// preserve result across HandleMark
preserve_this_klass = this_klass();
}
@@ -4316,9 +4155,40 @@
instanceKlassHandle this_klass (THREAD, preserve_this_klass);
debug_only(this_klass->verify();)
+ // Clear class if no error has occurred so destructor doesn't deallocate it
+ _klass = NULL;
return this_klass;
}
+// Destructor to clean up if there's an error
+ClassFileParser::~ClassFileParser() {
+ MetadataFactory::free_metadata(_loader_data, _cp);
+ MetadataFactory::free_array<u2>(_loader_data, _fields);
+
+ // Free methods
+ InstanceKlass::deallocate_methods(_loader_data, _methods);
+
+ // beware of the Universe::empty_blah_array!!
+ if (_inner_classes != Universe::the_empty_short_array()) {
+ MetadataFactory::free_array<u2>(_loader_data, _inner_classes);
+ }
+
+ // Free interfaces
+ InstanceKlass::deallocate_interfaces(_loader_data, _super_klass(),
+ _local_interfaces, _transitive_interfaces);
+
+ MetadataFactory::free_array<u1>(_loader_data, _annotations);
+ MetadataFactory::free_array<u1>(_loader_data, _type_annotations);
+ Annotations::free_contents(_loader_data, _fields_annotations);
+ Annotations::free_contents(_loader_data, _fields_type_annotations);
+
+ clear_class_metadata();
+
+ // deallocate the klass if already created.
+ MetadataFactory::free_metadata(_loader_data, _klass);
+ _klass = NULL;
+}
+
void ClassFileParser::print_field_layout(Symbol* name,
Array<u2>* fields,
constantPoolHandle cp,
@@ -4510,7 +4380,7 @@
}
}
-// utility method for appending and array with check for duplicates
+// utility methods for appending an array with check for duplicates
void append_interfaces(GrowableArray<Klass*>* result, Array<Klass*>* ifs) {
// iterate over new interfaces
@@ -4522,8 +4392,9 @@
}
}
-
-Array<Klass*>* ClassFileParser::compute_transitive_interfaces(ClassLoaderData* loader_data, instanceKlassHandle super, Array<Klass*>* local_ifs, TRAPS) {
+Array<Klass*>* ClassFileParser::compute_transitive_interfaces(
+ instanceKlassHandle super,
+ Array<Klass*>* local_ifs, TRAPS) {
// Compute maximum size for transitive interfaces
int max_transitive_size = 0;
int super_size = 0;
@@ -4570,7 +4441,7 @@
// length will be less than the max_transitive_size if duplicates were removed
int length = result->length();
assert(length <= max_transitive_size, "just checking");
- Array<Klass*>* new_result = MetadataFactory::new_array<Klass*>(loader_data, length, CHECK_NULL);
+ Array<Klass*>* new_result = MetadataFactory::new_array<Klass*>(_loader_data, length, CHECK_NULL);
for (int i = 0; i < length; i++) {
Klass* e = result->at(i);
assert(e != NULL, "just checking");
@@ -4580,7 +4451,6 @@
}
}
-
void ClassFileParser::check_super_class_access(instanceKlassHandle this_klass, TRAPS) {
Klass* super = this_klass->super();
if ((super != NULL) &&
--- a/hotspot/src/share/vm/classfile/classFileParser.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/classfile/classFileParser.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -34,6 +34,7 @@
#include "classfile/symbolTable.hpp"
class FieldAllocationCount;
+class FieldLayoutInfo;
// Parser for for .class files
@@ -47,6 +48,7 @@
u2 _major_version;
u2 _minor_version;
Symbol* _class_name;
+ ClassLoaderData* _loader_data;
KlassHandle _host_klass;
GrowableArray<Handle>* _cp_patches; // overrides for CP entries
@@ -58,33 +60,59 @@
// class attributes parsed before the instance klass is created:
bool _synthetic_flag;
+ int _sde_length;
+ char* _sde_buffer;
Symbol* _sourcefile;
Symbol* _generic_signature;
- char* _sde_buffer;
- int _sde_length;
- Array<u2>* _inner_classes;
+
+ // Metadata created before the instance klass is created. Must be deallocated
+ // if not transferred to the InstanceKlass upon successful class loading
+ // in which case these pointers have been set to NULL.
+ instanceKlassHandle _super_klass;
+ ConstantPool* _cp;
+ Array<u2>* _fields;
+ Array<Method*>* _methods;
+ Array<u2>* _inner_classes;
+ Array<Klass*>* _local_interfaces;
+ Array<Klass*>* _transitive_interfaces;
AnnotationArray* _annotations;
AnnotationArray* _type_annotations;
+ Array<AnnotationArray*>* _fields_annotations;
+ Array<AnnotationArray*>* _fields_type_annotations;
+ InstanceKlass* _klass; // InstanceKlass once created.
void set_class_synthetic_flag(bool x) { _synthetic_flag = x; }
void set_class_sourcefile(Symbol* x) { _sourcefile = x; }
void set_class_generic_signature(Symbol* x) { _generic_signature = x; }
void set_class_sde_buffer(char* x, int len) { _sde_buffer = x; _sde_length = len; }
- void set_class_inner_classes(Array<u2>* x) { _inner_classes = x; }
- void set_class_annotations(AnnotationArray* x) { _annotations = x; }
- void set_class_type_annotations(AnnotationArray* x) { _type_annotations = x; }
- void init_parsed_class_attributes() {
+
+ void init_parsed_class_attributes(ClassLoaderData* loader_data) {
+ _loader_data = loader_data;
_synthetic_flag = false;
_sourcefile = NULL;
_generic_signature = NULL;
_sde_buffer = NULL;
_sde_length = 0;
- _annotations = _type_annotations = NULL;
// initialize the other flags too:
_has_finalizer = _has_empty_finalizer = _has_vanilla_constructor = false;
_max_bootstrap_specifier_index = -1;
+ clear_class_metadata();
+ _klass = NULL;
}
void apply_parsed_class_attributes(instanceKlassHandle k); // update k
+ void apply_parsed_class_metadata(instanceKlassHandle k, int fields_count, TRAPS);
+ void clear_class_metadata() {
+ // metadata created before the instance klass is created. Must be
+ // deallocated if classfile parsing returns an error.
+ _cp = NULL;
+ _fields = NULL;
+ _methods = NULL;
+ _inner_classes = NULL;
+ _local_interfaces = NULL;
+ _transitive_interfaces = NULL;
+ _annotations = _type_annotations = NULL;
+ _fields_annotations = _fields_type_annotations = NULL;
+ }
class AnnotationCollector {
public:
@@ -124,11 +152,27 @@
void set_contended(bool contended) { set_annotation(_sun_misc_Contended); }
bool is_contended() { return has_annotation(_sun_misc_Contended); }
};
+
+ // This class also doubles as a holder for metadata cleanup.
class FieldAnnotationCollector: public AnnotationCollector {
+ ClassLoaderData* _loader_data;
+ AnnotationArray* _field_annotations;
+ AnnotationArray* _field_type_annotations;
public:
- FieldAnnotationCollector() : AnnotationCollector(_in_field) { }
+ FieldAnnotationCollector(ClassLoaderData* loader_data) :
+ AnnotationCollector(_in_field),
+ _loader_data(loader_data),
+ _field_annotations(NULL),
+ _field_type_annotations(NULL) {}
void apply_to(FieldInfo* f);
+ ~FieldAnnotationCollector();
+ AnnotationArray* field_annotations() { return _field_annotations; }
+ AnnotationArray* field_type_annotations() { return _field_type_annotations; }
+
+ void set_field_annotations(AnnotationArray* a) { _field_annotations = a; }
+ void set_field_type_annotations(AnnotationArray* a) { _field_type_annotations = a; }
};
+
class MethodAnnotationCollector: public AnnotationCollector {
public:
MethodAnnotationCollector() : AnnotationCollector(_in_method) { }
@@ -152,38 +196,30 @@
void set_stream(ClassFileStream* st) { _stream = st; }
// Constant pool parsing
- void parse_constant_pool_entries(ClassLoaderData* loader_data,
- constantPoolHandle cp, int length, TRAPS);
+ void parse_constant_pool_entries(int length, TRAPS);
- constantPoolHandle parse_constant_pool(ClassLoaderData* loader_data, TRAPS);
+ constantPoolHandle parse_constant_pool(TRAPS);
// Interface parsing
- Array<Klass*>* parse_interfaces(constantPoolHandle cp,
- int length,
- ClassLoaderData* loader_data,
+ Array<Klass*>* parse_interfaces(int length,
Handle protection_domain,
Symbol* class_name,
bool* has_default_methods,
TRAPS);
void record_defined_class_dependencies(instanceKlassHandle defined_klass, TRAPS);
+ instanceKlassHandle parse_super_class(int super_class_index, TRAPS);
// Field parsing
- void parse_field_attributes(ClassLoaderData* loader_data,
- constantPoolHandle cp, u2 attributes_count,
+ void parse_field_attributes(u2 attributes_count,
bool is_static, u2 signature_index,
u2* constantvalue_index_addr,
bool* is_synthetic_addr,
u2* generic_signature_index_addr,
- AnnotationArray** field_annotations,
- AnnotationArray** field_type_annotations,
FieldAnnotationCollector* parsed_annotations,
TRAPS);
- Array<u2>* parse_fields(ClassLoaderData* loader_data,
- Symbol* class_name,
- constantPoolHandle cp, bool is_interface,
+ Array<u2>* parse_fields(Symbol* class_name,
+ bool is_interface,
FieldAllocationCount *fac,
- Array<AnnotationArray*>** fields_annotations,
- Array<AnnotationArray*>** fields_type_annotations,
u2* java_fields_count_ptr, TRAPS);
void print_field_layout(Symbol* name,
@@ -195,65 +231,52 @@
int static_fields_end);
// Method parsing
- methodHandle parse_method(ClassLoaderData* loader_data,
- constantPoolHandle cp,
- bool is_interface,
+ methodHandle parse_method(bool is_interface,
AccessFlags* promoted_flags,
TRAPS);
- Array<Method*>* parse_methods(ClassLoaderData* loader_data,
- constantPoolHandle cp,
- bool is_interface,
+ Array<Method*>* parse_methods(bool is_interface,
AccessFlags* promoted_flags,
bool* has_final_method,
bool* has_default_method,
TRAPS);
- Array<int>* sort_methods(ClassLoaderData* loader_data,
- Array<Method*>* methods,
- TRAPS);
- u2* parse_exception_table(ClassLoaderData* loader_data,
- u4 code_length, u4 exception_table_length,
- constantPoolHandle cp, TRAPS);
+ intArray* sort_methods(Array<Method*>* methods);
+
+ u2* parse_exception_table(u4 code_length, u4 exception_table_length,
+ TRAPS);
void parse_linenumber_table(
u4 code_attribute_length, u4 code_length,
CompressedLineNumberWriteStream** write_stream, TRAPS);
u2* parse_localvariable_table(u4 code_length, u2 max_locals, u4 code_attribute_length,
- constantPoolHandle cp, u2* localvariable_table_length,
+ u2* localvariable_table_length,
bool isLVTT, TRAPS);
u2* parse_checked_exceptions(u2* checked_exceptions_length, u4 method_attribute_length,
- constantPoolHandle cp, TRAPS);
+ TRAPS);
void parse_type_array(u2 array_length, u4 code_length, u4* u1_index, u4* u2_index,
- u1* u1_array, u2* u2_array, constantPoolHandle cp, TRAPS);
- Array<u1>* parse_stackmap_table(ClassLoaderData* loader_data, u4 code_attribute_length, TRAPS);
+ u1* u1_array, u2* u2_array, TRAPS);
+ u1* parse_stackmap_table(u4 code_attribute_length, TRAPS);
// Classfile attribute parsing
- void parse_classfile_sourcefile_attribute(constantPoolHandle cp, TRAPS);
- void parse_classfile_source_debug_extension_attribute(constantPoolHandle cp,
- int length, TRAPS);
- u2 parse_classfile_inner_classes_attribute(ClassLoaderData* loader_data,
- u1* inner_classes_attribute_start,
+ void parse_classfile_sourcefile_attribute(TRAPS);
+ void parse_classfile_source_debug_extension_attribute(int length, TRAPS);
+ u2 parse_classfile_inner_classes_attribute(u1* inner_classes_attribute_start,
bool parsed_enclosingmethod_attribute,
u2 enclosing_method_class_index,
u2 enclosing_method_method_index,
- constantPoolHandle cp,
TRAPS);
- void parse_classfile_attributes(ClassLoaderData* loader_data,
- constantPoolHandle cp,
- ClassAnnotationCollector* parsed_annotations,
+ void parse_classfile_attributes(ClassAnnotationCollector* parsed_annotations,
TRAPS);
- void parse_classfile_synthetic_attribute(constantPoolHandle cp, TRAPS);
- void parse_classfile_signature_attribute(constantPoolHandle cp, TRAPS);
- void parse_classfile_bootstrap_methods_attribute(ClassLoaderData* loader_data, constantPoolHandle cp, u4 attribute_length, TRAPS);
+ void parse_classfile_synthetic_attribute(TRAPS);
+ void parse_classfile_signature_attribute(TRAPS);
+ void parse_classfile_bootstrap_methods_attribute(u4 attribute_length, TRAPS);
// Annotations handling
- AnnotationArray* assemble_annotations(ClassLoaderData* loader_data,
- u1* runtime_visible_annotations,
+ AnnotationArray* assemble_annotations(u1* runtime_visible_annotations,
int runtime_visible_annotations_length,
u1* runtime_invisible_annotations,
int runtime_invisible_annotations_length, TRAPS);
int skip_annotation(u1* buffer, int limit, int index);
int skip_annotation_value(u1* buffer, int limit, int index);
- void parse_annotations(ClassLoaderData* loader_data,
- u1* buffer, int limit, constantPoolHandle cp,
+ void parse_annotations(u1* buffer, int limit,
/* Results (currently, only one result is supported): */
AnnotationCollector* result,
TRAPS);
@@ -267,8 +290,7 @@
int* nonstatic_oop_offsets,
unsigned int* nonstatic_oop_counts);
void set_precomputed_flags(instanceKlassHandle k);
- Array<Klass*>* compute_transitive_interfaces(ClassLoaderData* loader_data,
- instanceKlassHandle super,
+ Array<Klass*>* compute_transitive_interfaces(instanceKlassHandle super,
Array<Klass*>* local_ifs, TRAPS);
// Format checker methods
@@ -318,7 +340,7 @@
bool is_supported_version(u2 major, u2 minor);
bool has_illegal_visibility(jint flags);
- void verify_constantvalue(int constantvalue_index, int signature_index, constantPoolHandle cp, TRAPS);
+ void verify_constantvalue(int constantvalue_index, int signature_index, TRAPS);
void verify_legal_utf8(const unsigned char* buffer, int length, TRAPS);
void verify_legal_class_name(Symbol* name, TRAPS);
void verify_legal_field_name(Symbol* name, TRAPS);
@@ -359,10 +381,17 @@
// In older versions of the VM, Klass*s cannot sneak into early phases of
// constant pool construction, but in later versions they can.
// %%% Let's phase out the old is_klass_reference.
- bool is_klass_reference(constantPoolHandle cp, int index) {
- return (EnableInvokeDynamic
- ? cp->tag_at(index).is_klass_or_reference()
- : cp->tag_at(index).is_klass_reference());
+ bool valid_klass_reference_at(int index) {
+ return _cp->is_within_bounds(index) &&
+ (EnableInvokeDynamic
+ ? _cp->tag_at(index).is_klass_or_reference()
+ : _cp->tag_at(index).is_klass_reference());
+ }
+
+ // Checks that the cpool index is in range and is a utf8
+ bool valid_symbol_at(int cpool_index) {
+ return (_cp->is_within_bounds(cpool_index) &&
+ _cp->tag_at(cpool_index).is_utf8());
}
void copy_localvariable_table(ConstMethod* cm, int lvt_cnt,
@@ -373,8 +402,7 @@
u2** localvariable_type_table_start,
TRAPS);
- void copy_method_annotations(ClassLoaderData* loader_data,
- ConstMethod* cm,
+ void copy_method_annotations(ConstMethod* cm,
u1* runtime_visible_annotations,
int runtime_visible_annotations_length,
u1* runtime_invisible_annotations,
@@ -391,9 +419,15 @@
int annotation_default_length,
TRAPS);
+ // lays out fields in class and returns the total oopmap count
+ void layout_fields(Handle class_loader, FieldAllocationCount* fac,
+ ClassAnnotationCollector* parsed_annotations,
+ FieldLayoutInfo* info, TRAPS);
+
public:
// Constructor
ClassFileParser(ClassFileStream* st) { set_stream(st); }
+ ~ClassFileParser();
// Parse .class file and return new Klass*. The Klass* is not hooked up
// to the system dictionary or any other structures, so a .class file can
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,18 +49,17 @@
Symbol* sym;
- if (c_heap) {
+ if (DumpSharedSpaces) {
+ // Allocate all symbols to CLD shared metaspace
+ sym = new (len, ClassLoaderData::the_null_class_loader_data(), THREAD) Symbol(name, len, -1);
+ } else if (c_heap) {
// refcount starts as 1
- assert(!DumpSharedSpaces, "never allocate to C heap");
sym = new (len, THREAD) Symbol(name, len, 1);
assert(sym != NULL, "new should call vm_exit_out_of_memory if C_HEAP is exhausted");
} else {
- if (DumpSharedSpaces) {
- sym = new (len, ClassLoaderData::the_null_class_loader_data(), THREAD) Symbol(name, len, -1);
- } else {
+ // Allocate to global arena
sym = new (len, arena(), THREAD) Symbol(name, len, -1);
}
- }
return sym;
}
--- a/hotspot/src/share/vm/code/compressedStream.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/code/compressedStream.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -195,7 +195,7 @@
// for this block (a matching directive turns it back on later).
// These directives can be removed once the MS VS.NET 2005
// compiler stack overflow is fixed.
-#if _MSC_VER >=1400 && !defined(_WIN64)
+#if defined(_MSC_VER) && _MSC_VER >=1400 && !defined(_WIN64)
#pragma optimize("", off)
#pragma warning(disable: 4748)
#endif
@@ -276,7 +276,7 @@
guarantee(fails == 0, "test failures");
}
-#if _MSC_VER >=1400 && !defined(_WIN64)
+#if defined(_MSC_VER) &&_MSC_VER >=1400 && !defined(_WIN64)
#pragma warning(default: 4748)
#pragma optimize("", on)
#endif
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -505,7 +505,7 @@
ResourceMark rm(thread);
// <task id='9' method='M' osr_bci='X' level='1' blocking='1' stamp='1.234'>
- if (_compile_id != 0) log->print(" compile_id='%d'", _compile_id);
+ log->print(" compile_id='%d'", _compile_id);
if (_osr_bci != CompileBroker::standard_entry_bci) {
log->print(" compile_kind='osr'"); // same as nmethod::compile_kind
} // else compile_kind='c2c'
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -2063,11 +2063,6 @@
// required.
_collectorState = FinalMarking;
}
- if (PrintGCDetails &&
- (_collectorState > Idling ||
- !GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) {
- gclog_or_tty->print(" (concurrent mode failure)");
- }
collect_in_foreground(clear_all_soft_refs);
// For a mark-sweep, compute_new_size() will be called
@@ -3400,10 +3395,10 @@
if (PrintCMSStatistics != 0) {
_collector->resetYields();
}
- if (PrintGCDetails && PrintGCTimeStamps) {
+ if (PrintGCDetails) {
gclog_or_tty->date_stamp(PrintGCDateStamps);
- gclog_or_tty->stamp();
- gclog_or_tty->print_cr(": [%s-concurrent-%s-start]",
+ gclog_or_tty->stamp(PrintGCTimeStamps);
+ gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
_collector->cmsGen()->short_name(), _phase);
}
_collector->resetTimer();
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -1310,11 +1310,6 @@
_markStack.expand();
}
-#if VERIFY_OBJS_PROCESSED
- _scan_obj_cl.objs_processed = 0;
- ThreadLocalObjQueue::objs_enqueued = 0;
-#endif
-
// Statistics
double now = os::elapsedTime();
_remark_mark_times.add((mark_work_end - start) * 1000.0);
@@ -2555,17 +2550,6 @@
guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant");
print_stats();
-
-#if VERIFY_OBJS_PROCESSED
- if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) {
- gclog_or_tty->print_cr("Processed = %d, enqueued = %d.",
- _scan_obj_cl.objs_processed,
- ThreadLocalObjQueue::objs_enqueued);
- guarantee(_scan_obj_cl.objs_processed ==
- ThreadLocalObjQueue::objs_enqueued,
- "Different number of objs processed and enqueued.");
- }
-#endif
}
#ifndef PRODUCT
@@ -4111,7 +4095,7 @@
// bitmap knows by how much we need to move it as it knows its
// granularity).
assert(_finger < _region_limit, "invariant");
- HeapWord* new_finger = _nextMarkBitMap->nextWord(_finger);
+ HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
// Check if bitmap iteration was aborted while scanning the last object
if (new_finger >= _region_limit) {
giveup_current_region();
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -97,7 +97,6 @@
HeapWord* limit = NULL) const;
// conversion utilities
- // XXX Fix these so that offsets are size_t's...
HeapWord* offsetToHeapWord(size_t offset) const {
return _bmStartWord + (offset << _shifter);
}
@@ -105,8 +104,13 @@
return pointer_delta(addr, _bmStartWord) >> _shifter;
}
int heapWordDiffToOffsetDiff(size_t diff) const;
- HeapWord* nextWord(HeapWord* addr) {
- return offsetToHeapWord(heapWordToOffset(addr) + 1);
+
+ // The argument addr should be the start address of a valid object
+ HeapWord* nextObject(HeapWord* addr) {
+ oop obj = (oop) addr;
+ HeapWord* res = addr + obj->size();
+ assert(offsetToHeapWord(heapWordToOffset(res)) == res, "sanity");
+ return res;
}
// debugging
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -252,12 +252,10 @@
start_offset = _bm.get_next_one_offset(start_offset, end_offset);
while (start_offset < end_offset) {
- HeapWord* obj_addr = offsetToHeapWord(start_offset);
- oop obj = (oop) obj_addr;
if (!cl->do_bit(start_offset)) {
return false;
}
- HeapWord* next_addr = MIN2(obj_addr + obj->size(), end_addr);
+ HeapWord* next_addr = MIN2(nextObject(offsetToHeapWord(start_offset)), end_addr);
BitMap::idx_t next_offset = heapWordToOffset(next_addr);
start_offset = _bm.get_next_one_offset(next_offset, end_offset);
}
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -409,7 +409,7 @@
// heap remains parsable.
const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
- assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
+
if (limit_exceeded && softrefs_clear) {
*gc_overhead_limit_was_exceeded = true;
size_policy()->set_gc_overhead_limit_exceeded(false);
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -948,6 +948,8 @@
break;
}
if (e != scan_end) {
+ assert(e < scan_end, err_msg("e: " PTR_FORMAT " scan_end: " PTR_FORMAT, e, scan_end));
+
if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id())
&& page_expected.size != 0) {
os::free_memory(s, pointer_delta(e, s, sizeof(char)), page_size);
--- a/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -186,7 +186,8 @@
int caller_actual_parameters,
int callee_params,
int callee_locals,
- bool is_top_frame) {
+ bool is_top_frame,
+ bool is_bottom_frame) {
return layout_activation(method,
temps,
popframe_args,
@@ -196,7 +197,8 @@
callee_locals,
(frame*)NULL,
(frame*)NULL,
- is_top_frame);
+ is_top_frame,
+ is_bottom_frame);
}
static int layout_activation(Method* method,
@@ -208,7 +210,8 @@
int callee_locals,
frame* caller,
frame* interpreter_frame,
- bool is_top_frame);
+ bool is_top_frame,
+ bool is_bottom_frame);
// Runtime support
static bool is_not_reached( methodHandle method, int bci);
--- a/hotspot/src/share/vm/interpreter/linkResolver.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -803,7 +803,7 @@
if (!direct_calling_default_method &&
check_access &&
// a) check if ACC_SUPER flag is set for the current class
- current_klass->is_super() &&
+ (current_klass->is_super() || !AllowNonVirtualCalls) &&
// b) check if the method class is a superclass of the current class (superclass relation is not reflexive!)
current_klass->is_subtype_of(method_klass()) &&
current_klass() != method_klass() &&
--- a/hotspot/src/share/vm/interpreter/rewriter.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/interpreter/rewriter.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -84,15 +84,13 @@
const int length = _cp_cache_map.length();
ClassLoaderData* loader_data = _pool->pool_holder()->class_loader_data();
ConstantPoolCache* cache =
- ConstantPoolCache::allocate(loader_data, length, CHECK);
+ ConstantPoolCache::allocate(loader_data, length, _cp_cache_map,
+ _invokedynamic_references_map, CHECK);
// initialize object cache in constant pool
_pool->initialize_resolved_references(loader_data, _resolved_references_map,
_resolved_reference_limit,
CHECK);
-
- No_Safepoint_Verifier nsv;
- cache->initialize(_cp_cache_map, _invokedynamic_references_map);
_pool->set_cache(cache);
cache->set_constant_pool(_pool());
}
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -620,7 +620,7 @@
const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
const bool softrefs_clear = all_soft_refs_clear();
- assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
+
if (limit_exceeded && softrefs_clear) {
*gc_overhead_limit_was_exceeded = true;
size_policy()->set_gc_overhead_limit_exceeded(false);
--- a/hotspot/src/share/vm/memory/filemap.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/memory/filemap.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -112,12 +112,19 @@
char* region_base(int i) { return _header._space[i]._base; }
struct FileMapHeader* header() { return &_header; }
- static void set_current_info(FileMapInfo* info) { _current_info = info; }
- static FileMapInfo* current_info() { return _current_info; }
+ static void set_current_info(FileMapInfo* info) {
+ CDS_ONLY(_current_info = info;)
+ }
+
+ static FileMapInfo* current_info() {
+ CDS_ONLY(return _current_info;)
+ NOT_CDS(return NULL;)
+ }
+
static void assert_mark(bool check);
// File manipulation.
- bool initialize();
+ bool initialize() NOT_CDS_RETURN_(false);
bool open_for_read();
void open_for_write();
void write_header();
@@ -141,7 +148,7 @@
void fail_continue(const char *msg, ...);
// Return true if given address is in the mapped shared space.
- bool is_in_shared_space(const void* p);
+ bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
};
#endif // SHARE_VM_MEMORY_FILEMAP_HPP
--- a/hotspot/src/share/vm/memory/metablock.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/memory/metablock.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
--- a/hotspot/src/share/vm/memory/metaspace.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/memory/metaspace.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,6 @@
const uint metadata_deallocate_a_lot_block = 10;
const uint metadata_deallocate_a_lock_chunk = 3;
size_t const allocation_from_dictionary_limit = 64 * K;
-const size_t metadata_deallocate = 0xf5f5f5f5;
MetaWord* last_allocated = 0;
@@ -2440,7 +2439,8 @@
free_chunks_capacity_bytes / K,
used_and_free / K,
capacity_bytes / K);
- assert(used_and_free == capacity_bytes, "Accounting is wrong");
+ // Accounting can only be correct if we got the values during a safepoint
+ assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
}
// Print total fragmentation for class and data metaspaces separately
--- a/hotspot/src/share/vm/memory/universe.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/memory/universe.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -885,6 +885,8 @@
// the actual alignment depends on its size.
Universe::set_class_metaspace_size(align_size_up(ClassMetaspaceSize, alignment));
size_t total_reserved = align_size_up(heap_size + Universe::class_metaspace_size(), alignment);
+ assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
+ "heap size is too big for compressed oops");
char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr);
--- a/hotspot/src/share/vm/oops/constMethod.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/oops/constMethod.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -55,11 +55,24 @@
set_stackmap_data(NULL);
set_code_size(byte_code_size);
set_constMethod_size(size);
- set_inlined_tables_length(sizes);
+ set_inlined_tables_length(sizes); // sets _flags
set_method_type(method_type);
assert(this->size() == size, "wrong size for object");
+ set_name_index(0);
+ set_signature_index(0);
+ set_constants(NULL);
+ set_max_stack(0);
+ set_max_locals(0);
+ set_method_idnum(0);
+ set_size_of_parameters(0);
}
+// Accessor that copies to metadata.
+void ConstMethod::copy_stackmap_data(ClassLoaderData* loader_data,
+ u1* sd, int length, TRAPS) {
+ _stackmap_data = MetadataFactory::new_array<u1>(loader_data, length, CHECK);
+ memcpy((void*)_stackmap_data->adr_at(0), (void*)sd, length);
+}
// Deallocate metadata fields associated with ConstMethod*
void ConstMethod::deallocate_contents(ClassLoaderData* loader_data) {
--- a/hotspot/src/share/vm/oops/constMethod.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/oops/constMethod.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -280,6 +280,7 @@
// stackmap table data
Array<u1>* stackmap_data() const { return _stackmap_data; }
void set_stackmap_data(Array<u1>* sd) { _stackmap_data = sd; }
+ void copy_stackmap_data(ClassLoaderData* loader_data, u1* sd, int length, TRAPS);
bool has_stackmap_table() const { return _stackmap_data != NULL; }
void init_fingerprint() {
--- a/hotspot/src/share/vm/oops/cpCache.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/oops/cpCache.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -44,6 +44,8 @@
void ConstantPoolCacheEntry::initialize_entry(int index) {
assert(0 < index && index < 0x10000, "sanity check");
_indices = index;
+ _f1 = NULL;
+ _f2 = _flags = 0;
assert(constant_pool_index() == index, "");
}
@@ -533,13 +535,17 @@
// Implementation of ConstantPoolCache
-ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data, int length, TRAPS) {
+ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data,
+ int length,
+ const intStack& index_map,
+ const intStack& invokedynamic_map, TRAPS) {
int size = ConstantPoolCache::size(length);
- return new (loader_data, size, false, THREAD) ConstantPoolCache(length);
+ return new (loader_data, size, false, THREAD) ConstantPoolCache(length, index_map, invokedynamic_map);
}
-void ConstantPoolCache::initialize(intArray& inverse_index_map, intArray& invokedynamic_references_map) {
+void ConstantPoolCache::initialize(const intArray& inverse_index_map,
+ const intArray& invokedynamic_references_map) {
assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
for (int i = 0; i < length(); i++) {
ConstantPoolCacheEntry* e = entry_at(i);
--- a/hotspot/src/share/vm/oops/cpCache.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/oops/cpCache.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -377,14 +377,21 @@
debug_only(friend class ClassVerifier;)
// Constructor
- ConstantPoolCache(int length) : _length(length), _constant_pool(NULL) {
+ ConstantPoolCache(int length, const intStack& inverse_index_map,
+ const intStack& invokedynamic_references_map) :
+ _length(length), _constant_pool(NULL) {
+ initialize(inverse_index_map, invokedynamic_references_map);
for (int i = 0; i < length; i++) {
assert(entry_at(i)->is_f1_null(), "Failed to clear?");
}
}
+ // Initialization
+ void initialize(const intArray& inverse_index_map, const intArray& invokedynamic_references_map);
public:
- static ConstantPoolCache* allocate(ClassLoaderData* loader_data, int length, TRAPS);
+ static ConstantPoolCache* allocate(ClassLoaderData* loader_data, int length,
+ const intStack& inverse_index_map,
+ const intStack& invokedynamic_references_map, TRAPS);
bool is_constantPoolCache() const { return true; }
int length() const { return _length; }
@@ -405,9 +412,6 @@
friend class ConstantPoolCacheEntry;
public:
- // Initialization
- void initialize(intArray& inverse_index_map, intArray& invokedynamic_references_map);
-
// Accessors
void set_constant_pool(ConstantPool* pool) { _constant_pool = pool; }
ConstantPool* constant_pool() const { return _constant_pool; }
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -165,7 +165,8 @@
volatile int InstanceKlass::_total_instanceKlass_count = 0;
-Klass* InstanceKlass::allocate_instance_klass(ClassLoaderData* loader_data,
+InstanceKlass* InstanceKlass::allocate_instance_klass(
+ ClassLoaderData* loader_data,
int vtable_len,
int itable_len,
int static_field_size,
@@ -207,10 +208,35 @@
access_flags, is_anonymous);
}
+ // Check for pending exception before adding to the loader data and incrementing
+ // class count. Can get OOM here.
+ if (HAS_PENDING_EXCEPTION) {
+ return NULL;
+ }
+
+ // Add all classes to our internal class loader list here,
+ // including classes in the bootstrap (NULL) class loader.
+ loader_data->add_class(ik);
+
Atomic::inc(&_total_instanceKlass_count);
return ik;
}
+
+// copy method ordering from resource area to Metaspace
+void InstanceKlass::copy_method_ordering(intArray* m, TRAPS) {
+ if (m != NULL) {
+ // allocate a new array and copy contents (memcpy?)
+ _method_ordering = MetadataFactory::new_array<int>(class_loader_data(), m->length(), CHECK);
+ for (int i = 0; i < m->length(); i++) {
+ _method_ordering->at_put(i, m->at(i));
+ }
+ } else {
+ _method_ordering = Universe::the_empty_int_array();
+ }
+}
+
+
InstanceKlass::InstanceKlass(int vtable_len,
int itable_len,
int static_field_size,
@@ -220,72 +246,116 @@
bool is_anonymous) {
No_Safepoint_Verifier no_safepoint; // until k becomes parsable
- int size = InstanceKlass::size(vtable_len, itable_len, nonstatic_oop_map_size,
- access_flags.is_interface(), is_anonymous);
-
- // The sizes of these these three variables are used for determining the
- // size of the instanceKlassOop. It is critical that these are set to the right
- // sizes before the first GC, i.e., when we allocate the mirror.
- this->set_vtable_length(vtable_len);
- this->set_itable_length(itable_len);
- this->set_static_field_size(static_field_size);
- this->set_nonstatic_oop_map_size(nonstatic_oop_map_size);
- this->set_access_flags(access_flags);
- this->set_is_anonymous(is_anonymous);
- assert(this->size() == size, "wrong size for object");
-
- this->set_array_klasses(NULL);
- this->set_methods(NULL);
- this->set_method_ordering(NULL);
- this->set_local_interfaces(NULL);
- this->set_transitive_interfaces(NULL);
- this->init_implementor();
- this->set_fields(NULL, 0);
- this->set_constants(NULL);
- this->set_class_loader_data(NULL);
- this->set_protection_domain(NULL);
- this->set_signers(NULL);
- this->set_source_file_name(NULL);
- this->set_source_debug_extension(NULL, 0);
- this->set_array_name(NULL);
- this->set_inner_classes(NULL);
- this->set_static_oop_field_count(0);
- this->set_nonstatic_field_size(0);
- this->set_is_marked_dependent(false);
- this->set_init_state(InstanceKlass::allocated);
- this->set_init_thread(NULL);
- this->set_init_lock(NULL);
- this->set_reference_type(rt);
- this->set_oop_map_cache(NULL);
- this->set_jni_ids(NULL);
- this->set_osr_nmethods_head(NULL);
- this->set_breakpoints(NULL);
- this->init_previous_versions();
- this->set_generic_signature(NULL);
- this->release_set_methods_jmethod_ids(NULL);
- this->release_set_methods_cached_itable_indices(NULL);
- this->set_annotations(NULL);
- this->set_jvmti_cached_class_field_map(NULL);
- this->set_initial_method_idnum(0);
+ int iksize = InstanceKlass::size(vtable_len, itable_len, nonstatic_oop_map_size,
+ access_flags.is_interface(), is_anonymous);
+
+ set_vtable_length(vtable_len);
+ set_itable_length(itable_len);
+ set_static_field_size(static_field_size);
+ set_nonstatic_oop_map_size(nonstatic_oop_map_size);
+ set_access_flags(access_flags);
+ _misc_flags = 0; // initialize to zero
+ set_is_anonymous(is_anonymous);
+ assert(size() == iksize, "wrong size for object");
+
+ set_array_klasses(NULL);
+ set_methods(NULL);
+ set_method_ordering(NULL);
+ set_local_interfaces(NULL);
+ set_transitive_interfaces(NULL);
+ init_implementor();
+ set_fields(NULL, 0);
+ set_constants(NULL);
+ set_class_loader_data(NULL);
+ set_protection_domain(NULL);
+ set_signers(NULL);
+ set_source_file_name(NULL);
+ set_source_debug_extension(NULL, 0);
+ set_array_name(NULL);
+ set_inner_classes(NULL);
+ set_static_oop_field_count(0);
+ set_nonstatic_field_size(0);
+ set_is_marked_dependent(false);
+ set_init_state(InstanceKlass::allocated);
+ set_init_thread(NULL);
+ set_init_lock(NULL);
+ set_reference_type(rt);
+ set_oop_map_cache(NULL);
+ set_jni_ids(NULL);
+ set_osr_nmethods_head(NULL);
+ set_breakpoints(NULL);
+ init_previous_versions();
+ set_generic_signature(NULL);
+ release_set_methods_jmethod_ids(NULL);
+ release_set_methods_cached_itable_indices(NULL);
+ set_annotations(NULL);
+ set_jvmti_cached_class_field_map(NULL);
+ set_initial_method_idnum(0);
+ _dependencies = NULL;
+ set_jvmti_cached_class_field_map(NULL);
+ set_cached_class_file(NULL, 0);
+ set_initial_method_idnum(0);
+ set_minor_version(0);
+ set_major_version(0);
+ NOT_PRODUCT(_verify_count = 0;)
// initialize the non-header words to zero
intptr_t* p = (intptr_t*)this;
- for (int index = InstanceKlass::header_size(); index < size; index++) {
+ for (int index = InstanceKlass::header_size(); index < iksize; index++) {
p[index] = NULL_WORD;
}
// Set temporary value until parseClassFile updates it with the real instance
// size.
- this->set_layout_helper(Klass::instance_layout_helper(0, true));
+ set_layout_helper(Klass::instance_layout_helper(0, true));
+}
+
+
+void InstanceKlass::deallocate_methods(ClassLoaderData* loader_data,
+ Array<Method*>* methods) {
+ if (methods != NULL && methods != Universe::the_empty_method_array()) {
+ for (int i = 0; i < methods->length(); i++) {
+ Method* method = methods->at(i);
+ if (method == NULL) continue; // maybe null if error processing
+ // Only want to delete methods that are not executing for RedefineClasses.
+ // The previous version will point to them so they're not totally dangling
+ assert (!method->on_stack(), "shouldn't be called with methods on stack");
+ MetadataFactory::free_metadata(loader_data, method);
+ }
+ MetadataFactory::free_array<Method*>(loader_data, methods);
+ }
}
+void InstanceKlass::deallocate_interfaces(ClassLoaderData* loader_data,
+ Klass* super_klass,
+ Array<Klass*>* local_interfaces,
+ Array<Klass*>* transitive_interfaces) {
+ // Only deallocate transitive interfaces if not empty, same as super class
+ // or same as local interfaces. See code in parseClassFile.
+ Array<Klass*>* ti = transitive_interfaces;
+ if (ti != Universe::the_empty_klass_array() && ti != local_interfaces) {
+ // check that the interfaces don't come from super class
+ Array<Klass*>* sti = (super_klass == NULL) ? NULL :
+ InstanceKlass::cast(super_klass)->transitive_interfaces();
+ if (ti != sti) {
+ MetadataFactory::free_array<Klass*>(loader_data, ti);
+ }
+ }
+
+ // local interfaces can be empty
+ if (local_interfaces != Universe::the_empty_klass_array()) {
+ MetadataFactory::free_array<Klass*>(loader_data, local_interfaces);
+ }
+}
// This function deallocates the metadata and C heap pointers that the
// InstanceKlass points to.
void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) {
// Orphan the mirror first, CMS thinks it's still live.
- java_lang_Class::set_klass(java_mirror(), NULL);
+ if (java_mirror() != NULL) {
+ java_lang_Class::set_klass(java_mirror(), NULL);
+ }
// Need to take this class off the class loader data list.
loader_data->remove_class(this);
@@ -300,17 +370,7 @@
// reference counting symbol names.
release_C_heap_structures();
- Array<Method*>* ms = methods();
- if (ms != Universe::the_empty_method_array()) {
- for (int i = 0; i <= methods()->length() -1 ; i++) {
- Method* method = methods()->at(i);
- // Only want to delete methods that are not executing for RedefineClasses.
- // The previous version will point to them so they're not totally dangling
- assert (!method->on_stack(), "shouldn't be called with methods on stack");
- MetadataFactory::free_metadata(loader_data, method);
- }
- MetadataFactory::free_array<Method*>(loader_data, methods());
- }
+ deallocate_methods(loader_data, methods());
set_methods(NULL);
if (method_ordering() != Universe::the_empty_int_array()) {
@@ -327,24 +387,8 @@
}
set_secondary_supers(NULL);
- // Only deallocate transitive interfaces if not empty, same as super class
- // or same as local interfaces. See code in parseClassFile.
- Array<Klass*>* ti = transitive_interfaces();
- if (ti != Universe::the_empty_klass_array() && ti != local_interfaces()) {
- // check that the interfaces don't come from super class
- Array<Klass*>* sti = (super() == NULL) ? NULL :
- InstanceKlass::cast(super())->transitive_interfaces();
- if (ti != sti) {
- MetadataFactory::free_array<Klass*>(loader_data, ti);
- }
- }
+ deallocate_interfaces(loader_data, super(), local_interfaces(), transitive_interfaces());
set_transitive_interfaces(NULL);
-
- // local interfaces can be empty
- Array<Klass*>* li = local_interfaces();
- if (li != Universe::the_empty_klass_array()) {
- MetadataFactory::free_array<Klass*>(loader_data, li);
- }
set_local_interfaces(NULL);
MetadataFactory::free_array<jushort>(loader_data, fields());
@@ -352,9 +396,11 @@
// If a method from a redefined class is using this constant pool, don't
// delete it, yet. The new class's previous version will point to this.
- assert (!constants()->on_stack(), "shouldn't be called if anything is onstack");
- MetadataFactory::free_metadata(loader_data, constants());
- set_constants(NULL);
+ if (constants() != NULL) {
+ assert (!constants()->on_stack(), "shouldn't be called if anything is onstack");
+ MetadataFactory::free_metadata(loader_data, constants());
+ set_constants(NULL);
+ }
if (inner_classes() != Universe::the_empty_short_array()) {
MetadataFactory::free_array<jushort>(loader_data, inner_classes());
@@ -2785,7 +2831,7 @@
st->print(BULLET"protection domain: "); ((InstanceKlass*)this)->protection_domain()->print_value_on(st); st->cr();
st->print(BULLET"host class: "); host_klass()->print_value_on_maybe_null(st); st->cr();
st->print(BULLET"signers: "); signers()->print_value_on(st); st->cr();
- st->print(BULLET"init_lock: "); ((oop)init_lock())->print_value_on(st); st->cr();
+ st->print(BULLET"init_lock: "); ((oop)_init_lock)->print_value_on(st); st->cr();
if (source_file_name() != NULL) {
st->print(BULLET"source file: ");
source_file_name()->print_value_on(st);
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -147,7 +147,8 @@
AccessFlags access_flags,
bool is_anonymous);
public:
- static Klass* allocate_instance_klass(ClassLoaderData* loader_data,
+ static InstanceKlass* allocate_instance_klass(
+ ClassLoaderData* loader_data,
int vtable_len,
int itable_len,
int static_field_size,
@@ -266,8 +267,9 @@
u1 _init_state; // state of class
u1 _reference_type; // reference type
+ JvmtiCachedClassFieldMap* _jvmti_cached_class_field_map; // JVMTI: used during heap iteration
- JvmtiCachedClassFieldMap* _jvmti_cached_class_field_map; // JVMTI: used during heap iteration
+ NOT_PRODUCT(int _verify_count;) // to avoid redundant verifies
// Method array.
Array<Method*>* _methods;
@@ -356,16 +358,19 @@
// method ordering
Array<int>* method_ordering() const { return _method_ordering; }
void set_method_ordering(Array<int>* m) { _method_ordering = m; }
+ void copy_method_ordering(intArray* m, TRAPS);
// interfaces
Array<Klass*>* local_interfaces() const { return _local_interfaces; }
void set_local_interfaces(Array<Klass*>* a) {
guarantee(_local_interfaces == NULL || a == NULL, "Just checking");
_local_interfaces = a; }
+
Array<Klass*>* transitive_interfaces() const { return _transitive_interfaces; }
void set_transitive_interfaces(Array<Klass*>* a) {
guarantee(_transitive_interfaces == NULL || a == NULL, "Just checking");
- _transitive_interfaces = a; }
+ _transitive_interfaces = a;
+ }
private:
friend class fieldDescriptor;
@@ -381,10 +386,9 @@
int java_fields_count() const { return (int)_java_fields_count; }
Array<u2>* fields() const { return _fields; }
-
void set_fields(Array<u2>* f, u2 java_fields_count) {
guarantee(_fields == NULL || f == NULL, "Just checking");
- _fields = f;
+ _fields = f;
_java_fields_count = java_fields_count;
}
@@ -588,7 +592,7 @@
// symbol unloading support (refcount already added)
Symbol* array_name() { return _array_name; }
- void set_array_name(Symbol* name) { assert(_array_name == NULL, "name already created"); _array_name = name; }
+ void set_array_name(Symbol* name) { assert(_array_name == NULL || name == NULL, "name already created"); _array_name = name; }
// nonstatic oop-map blocks
static int nonstatic_oop_map_size(unsigned int oop_map_count) {
@@ -914,8 +918,15 @@
void clean_method_data(BoolObjectClosure* is_alive);
// Explicit metaspace deallocation of fields
- // For RedefineClasses, we need to deallocate instanceKlasses
+ // For RedefineClasses and class file parsing errors, we need to deallocate
+ // instanceKlasses and the metadata they point to.
void deallocate_contents(ClassLoaderData* loader_data);
+ static void deallocate_methods(ClassLoaderData* loader_data,
+ Array<Method*>* methods);
+ void static deallocate_interfaces(ClassLoaderData* loader_data,
+ Klass* super_klass,
+ Array<Klass*>* local_interfaces,
+ Array<Klass*>* transitive_interfaces);
// The constant pool is on stack if any of the methods are executing or
// referenced by handles.
--- a/hotspot/src/share/vm/oops/klass.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/oops/klass.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -146,16 +146,16 @@
Klass::Klass() {
Klass* k = this;
- { // Preinitialize supertype information.
- // A later call to initialize_supers() may update these settings:
- set_super(NULL);
- for (juint i = 0; i < Klass::primary_super_limit(); i++) {
- _primary_supers[i] = NULL;
- }
- set_secondary_supers(NULL);
- _primary_supers[0] = k;
- set_super_check_offset(in_bytes(primary_supers_offset()));
+ // Preinitialize supertype information.
+ // A later call to initialize_supers() may update these settings:
+ set_super(NULL);
+ for (juint i = 0; i < Klass::primary_super_limit(); i++) {
+ _primary_supers[i] = NULL;
}
+ set_secondary_supers(NULL);
+ set_secondary_super_cache(NULL);
+ _primary_supers[0] = k;
+ set_super_check_offset(in_bytes(primary_supers_offset()));
set_java_mirror(NULL);
set_modifier_flags(0);
--- a/hotspot/src/share/vm/oops/klass.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/oops/klass.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -79,7 +79,6 @@
// [last_biased_lock_bulk_revocation_time] (64 bits)
// [prototype_header]
// [biased_lock_revocation_count]
-// [verify_count ] - not in product
// [alloc_count ]
// [_modified_oops]
// [_accumulated_modified_oops]
@@ -172,10 +171,6 @@
markOop _prototype_header; // Used when biased locking is both enabled and disabled for this type
jint _biased_lock_revocation_count;
-#ifndef PRODUCT
- int _verify_count; // to avoid redundant verifies
-#endif
-
juint _alloc_count; // allocation profiling support
TRACE_DEFINE_KLASS_TRACE_ID;
--- a/hotspot/src/share/vm/oops/method.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/oops/method.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -77,22 +77,19 @@
return new (loader_data, size, false, THREAD) Method(cm, access_flags, size);
}
-Method::Method(ConstMethod* xconst,
- AccessFlags access_flags, int size) {
+Method::Method(ConstMethod* xconst, AccessFlags access_flags, int size) {
No_Safepoint_Verifier no_safepoint;
set_constMethod(xconst);
set_access_flags(access_flags);
set_method_size(size);
- set_name_index(0);
- set_signature_index(0);
#ifdef CC_INTERP
set_result_index(T_VOID);
#endif
- set_constants(NULL);
- set_max_stack(0);
- set_max_locals(0);
set_intrinsic_id(vmIntrinsics::_none);
set_jfr_towrite(false);
+ set_force_inline(false);
+ set_hidden(false);
+ set_dont_inline(false);
set_method_data(NULL);
set_interpreter_throwout_count(0);
set_vtable_index(Method::garbage_vtable_index);
--- a/hotspot/src/share/vm/oops/methodData.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/oops/methodData.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -652,23 +652,25 @@
// Set the method back-pointer.
_method = method();
- if (TieredCompilation) {
- _invocation_counter.init();
- _backedge_counter.init();
- _invocation_counter_start = 0;
- _backedge_counter_start = 0;
- _num_loops = 0;
- _num_blocks = 0;
- _highest_comp_level = 0;
- _highest_osr_comp_level = 0;
- _would_profile = true;
- }
+ _invocation_counter.init();
+ _backedge_counter.init();
+ _invocation_counter_start = 0;
+ _backedge_counter_start = 0;
+ _num_loops = 0;
+ _num_blocks = 0;
+ _highest_comp_level = 0;
+ _highest_osr_comp_level = 0;
+ _would_profile = true;
set_creation_mileage(mileage_of(method()));
// Initialize flags and trap history.
_nof_decompiles = 0;
_nof_overflow_recompiles = 0;
_nof_overflow_traps = 0;
+ _eflags = 0;
+ _arg_local = 0;
+ _arg_stack = 0;
+ _arg_returned = 0;
assert(sizeof(_trap_hist) % sizeof(HeapWord) == 0, "align");
Copy::zero_to_words((HeapWord*) &_trap_hist,
sizeof(_trap_hist) / sizeof(HeapWord));
@@ -677,6 +679,7 @@
// corresponding data cells.
int data_size = 0;
int empty_bc_count = 0; // number of bytecodes lacking data
+ _data[0] = 0; // apparently not set below.
BytecodeStream stream(method);
Bytecodes::Code c;
while ((c = stream.next()) >= 0) {
@@ -710,6 +713,7 @@
post_initialize(&stream);
set_size(object_size);
+
}
// Get a measure of how much mileage the method has on it.
--- a/hotspot/src/share/vm/opto/chaitin.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/opto/chaitin.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -187,31 +187,6 @@
#endif
};
-//------------------------------LRG_List---------------------------------------
-// Map Node indices to Live RanGe indices.
-// Array lookup in the optimized case.
-class LRG_List : public ResourceObj {
- friend class VMStructs;
- uint _cnt, _max;
- uint* _lidxs;
- ReallocMark _nesting; // assertion check for reallocations
-public:
- LRG_List( uint max );
-
- uint lookup( uint nidx ) const {
- return _lidxs[nidx];
- }
- uint operator[] (uint nidx) const { return lookup(nidx); }
-
- void map( uint nidx, uint lidx ) {
- assert( nidx < _cnt, "oob" );
- _lidxs[nidx] = lidx;
- }
- void extend( uint nidx, uint lidx );
-
- uint Size() const { return _cnt; }
-};
-
//------------------------------IFG--------------------------------------------
// InterFerence Graph
// An undirected graph implementation. Created with a fixed number of
--- a/hotspot/src/share/vm/opto/compile.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/opto/compile.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -892,7 +892,7 @@
: Phase(Compiler),
_env(ci_env),
_log(ci_env->log()),
- _compile_id(-1),
+ _compile_id(0),
_save_argument_registers(save_arg_registers),
_method(NULL),
_stub_name(stub_name),
--- a/hotspot/src/share/vm/opto/live.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/opto/live.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -33,11 +33,35 @@
#include "opto/regmask.hpp"
class Block;
-class LRG_List;
class PhaseCFG;
class VectorSet;
class IndexSet;
+//------------------------------LRG_List---------------------------------------
+// Map Node indices to Live RanGe indices.
+// Array lookup in the optimized case.
+class LRG_List : public ResourceObj {
+ friend class VMStructs;
+ uint _cnt, _max;
+ uint* _lidxs;
+ ReallocMark _nesting; // assertion check for reallocations
+public:
+ LRG_List( uint max );
+
+ uint lookup( uint nidx ) const {
+ return _lidxs[nidx];
+ }
+ uint operator[] (uint nidx) const { return lookup(nidx); }
+
+ void map( uint nidx, uint lidx ) {
+ assert( nidx < _cnt, "oob" );
+ _lidxs[nidx] = lidx;
+ }
+ void extend( uint nidx, uint lidx );
+
+ uint Size() const { return _cnt; }
+};
+
//------------------------------PhaseLive--------------------------------------
// Compute live-in/live-out
class PhaseLive : public Phase {
--- a/hotspot/src/share/vm/prims/jvmtiImpl.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/prims/jvmtiImpl.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -890,7 +890,7 @@
tty->print("Suspended Threads: [");
for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
-#if JVMTI_TRACE
+#ifdef JVMTI_TRACE
const char *name = JvmtiTrace::safe_get_thread_name(thread);
#else
const char *name = "";
--- a/hotspot/src/share/vm/prims/jvmtiTrace.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/prims/jvmtiTrace.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -43,10 +43,10 @@
// Support tracing except in product build on the client compiler
#ifndef PRODUCT
-#define JVMTI_TRACE 1
+#define JVMTI_TRACE
#else
#ifdef COMPILER2
-#define JVMTI_TRACE 1
+#define JVMTI_TRACE
#endif
#endif
--- a/hotspot/src/share/vm/runtime/arguments.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -1381,6 +1381,40 @@
return false;
}
+void Arguments::set_use_compressed_oops() {
+#ifndef ZERO
+#ifdef _LP64
+ // MaxHeapSize is not set up properly at this point, but
+ // the only value that can override MaxHeapSize if we are
+ // to use UseCompressedOops is InitialHeapSize.
+ size_t max_heap_size = MAX2(MaxHeapSize, InitialHeapSize);
+
+ if (max_heap_size <= max_heap_for_compressed_oops()) {
+#if !defined(COMPILER1) || defined(TIERED)
+ if (FLAG_IS_DEFAULT(UseCompressedOops)) {
+ FLAG_SET_ERGO(bool, UseCompressedOops, true);
+ }
+#endif
+#ifdef _WIN64
+ if (UseLargePages && UseCompressedOops) {
+ // Cannot allocate guard pages for implicit checks in indexed addressing
+ // mode, when large pages are specified on windows.
+ // This flag could be switched ON if narrow oop base address is set to 0,
+ // see code in Universe::initialize_heap().
+ Universe::set_narrow_oop_use_implicit_null_checks(false);
+ }
+#endif // _WIN64
+ } else {
+ if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) {
+ warning("Max heap size too large for Compressed Oops");
+ FLAG_SET_DEFAULT(UseCompressedOops, false);
+ FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
+ }
+ }
+#endif // _LP64
+#endif // ZERO
+}
+
void Arguments::set_ergonomics_flags() {
if (os::is_server_class_machine()) {
@@ -1410,30 +1444,7 @@
#ifndef ZERO
#ifdef _LP64
- // Check that UseCompressedOops can be set with the max heap size allocated
- // by ergonomics.
- if (MaxHeapSize <= max_heap_for_compressed_oops()) {
-#if !defined(COMPILER1) || defined(TIERED)
- if (FLAG_IS_DEFAULT(UseCompressedOops)) {
- FLAG_SET_ERGO(bool, UseCompressedOops, true);
- }
-#endif
-#ifdef _WIN64
- if (UseLargePages && UseCompressedOops) {
- // Cannot allocate guard pages for implicit checks in indexed addressing
- // mode, when large pages are specified on windows.
- // This flag could be switched ON if narrow oop base address is set to 0,
- // see code in Universe::initialize_heap().
- Universe::set_narrow_oop_use_implicit_null_checks(false);
- }
-#endif // _WIN64
- } else {
- if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) {
- warning("Max heap size too large for Compressed Oops");
- FLAG_SET_DEFAULT(UseCompressedOops, false);
- FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
- }
- }
+ set_use_compressed_oops();
// UseCompressedOops must be on for UseCompressedKlassPointers to be on.
if (!UseCompressedOops) {
if (UseCompressedKlassPointers) {
@@ -1813,6 +1824,13 @@
}
}
+void Arguments::check_deprecated_gc_flags() {
+ if (FLAG_IS_CMDLINE(MaxGCMinorPauseMillis)) {
+ warning("Using MaxGCMinorPauseMillis as minor pause goal is deprecated"
+ "and will likely be removed in future release");
+ }
+}
+
// Check stack pages settings
bool Arguments::check_stack_pages()
{
@@ -2273,10 +2291,12 @@
}
#if !INCLUDE_JVMTI
if ((strcmp(name, "hprof") == 0) || (strcmp(name, "jdwp") == 0)) {
- warning("profiling and debugging agents are not supported in this VM");
- } else
+ jio_fprintf(defaultStream::error_stream(),
+ "Profiling and debugging agents are not supported in this VM\n");
+ return JNI_ERR;
+ }
#endif // !INCLUDE_JVMTI
- add_init_library(name, options);
+ add_init_library(name, options);
}
// -agentlib and -agentpath
} else if (match_option(option, "-agentlib:", &tail) ||
@@ -2293,16 +2313,19 @@
}
#if !INCLUDE_JVMTI
if ((strcmp(name, "hprof") == 0) || (strcmp(name, "jdwp") == 0)) {
- warning("profiling and debugging agents are not supported in this VM");
- } else
+ jio_fprintf(defaultStream::error_stream(),
+ "Profiling and debugging agents are not supported in this VM\n");
+ return JNI_ERR;
+ }
#endif // !INCLUDE_JVMTI
add_init_agent(name, options, is_absolute_path);
-
}
// -javaagent
} else if (match_option(option, "-javaagent:", &tail)) {
#if !INCLUDE_JVMTI
- warning("Instrumentation agents are not supported in this VM");
+ jio_fprintf(defaultStream::error_stream(),
+ "Instrumentation agents are not supported in this VM\n");
+ return JNI_ERR;
#else
if(tail != NULL) {
char *options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(tail) + 1, mtInternal), tail);
@@ -2443,8 +2466,9 @@
#if INCLUDE_FPROF
_has_profile = true;
#else // INCLUDE_FPROF
- // do we have to exit?
- warning("Flat profiling is not supported in this VM.");
+ jio_fprintf(defaultStream::error_stream(),
+ "Flat profiling is not supported in this VM.\n");
+ return JNI_ERR;
#endif // INCLUDE_FPROF
// -Xaprof
} else if (match_option(option, "-Xaprof", &tail)) {
@@ -2478,8 +2502,9 @@
#if INCLUDE_MANAGEMENT
FLAG_SET_CMDLINE(bool, ManagementServer, true);
#else
- vm_exit_during_initialization(
- "-Dcom.sun.management is not supported in this VM.", NULL);
+ jio_fprintf(defaultStream::output_stream(),
+ "-Dcom.sun.management is not supported in this VM.\n");
+ return JNI_ERR;
#endif
}
// -Xint
@@ -2492,16 +2517,10 @@
} else if (match_option(option, "-Xcomp", &tail)) {
// for testing the compiler; turn off all flags that inhibit compilation
set_mode_flags(_comp);
-
// -Xshare:dump
} else if (match_option(option, "-Xshare:dump", &tail)) {
-#if !INCLUDE_CDS
- vm_exit_during_initialization(
- "Dumping a shared archive is not supported in this VM.", NULL);
-#else
FLAG_SET_CMDLINE(bool, DumpSharedSpaces, true);
set_mode_flags(_int); // Prevent compilation, which creates objects
-#endif
// -Xshare:on
} else if (match_option(option, "-Xshare:on", &tail)) {
FLAG_SET_CMDLINE(bool, UseSharedSpaces, true);
@@ -2514,7 +2533,6 @@
} else if (match_option(option, "-Xshare:off", &tail)) {
FLAG_SET_CMDLINE(bool, UseSharedSpaces, false);
FLAG_SET_CMDLINE(bool, RequireSharedSpaces, false);
-
// -Xverify
} else if (match_option(option, "-Xverify", &tail)) {
if (strcmp(tail, ":all") == 0 || strcmp(tail, "") == 0) {
@@ -2828,8 +2846,9 @@
FLAG_SET_CMDLINE(bool, UseVMInterruptibleIO, true);
#if !INCLUDE_MANAGEMENT
} else if (match_option(option, "-XX:+ManagementServer", &tail)) {
- vm_exit_during_initialization(
- "ManagementServer is not supported in this VM.", NULL);
+ jio_fprintf(defaultStream::error_stream(),
+ "ManagementServer is not supported in this VM.\n");
+ return JNI_ERR;
#endif // INCLUDE_MANAGEMENT
} else if (match_option(option, "-XX:", &tail)) { // -XX:xxxx
// Skip -XX:Flags= since that case has already been handled
@@ -3135,7 +3154,9 @@
#if INCLUDE_NMT
MemTracker::init_tracking_options(tail);
#else
- warning("Native Memory Tracking is not supported in this VM");
+ jio_fprintf(defaultStream::error_stream(),
+ "Native Memory Tracking is not supported in this VM\n");
+ return JNI_ERR;
#endif
}
@@ -3254,6 +3275,16 @@
force_serial_gc();
#endif // INCLUDE_ALL_GCS
#if !INCLUDE_CDS
+ if (DumpSharedSpaces || RequireSharedSpaces) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Shared spaces are not supported in this VM\n");
+ return JNI_ERR;
+ }
+ if ((UseSharedSpaces && FLAG_IS_CMDLINE(UseSharedSpaces)) || PrintSharedSpaces) {
+ warning("Shared spaces are not supported in this VM");
+ FLAG_SET_DEFAULT(UseSharedSpaces, false);
+ FLAG_SET_DEFAULT(PrintSharedSpaces, false);
+ }
no_shared_spaces();
#endif // INCLUDE_CDS
@@ -3292,6 +3323,7 @@
set_g1_gc_flags();
}
check_deprecated_gcs();
+ check_deprecated_gc_flags();
#else // INCLUDE_ALL_GCS
assert(verify_serial_gc_flags(), "SerialGC unset");
#endif // INCLUDE_ALL_GCS
--- a/hotspot/src/share/vm/runtime/arguments.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/runtime/arguments.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -309,6 +309,7 @@
// Garbage-First (UseG1GC)
static void set_g1_gc_flags();
// GC ergonomics
+ static void set_use_compressed_oops();
static void set_ergonomics_flags();
static void set_shared_spaces_flags();
// Setup heap size
@@ -414,6 +415,7 @@
// Check for consistency in the selection of the garbage collector.
static bool check_gc_consistency();
static void check_deprecated_gcs();
+ static void check_deprecated_gc_flags();
// Check consistecy or otherwise of VM argument settings
static bool check_vm_args_consistency();
// Check stack pages settings
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -425,6 +425,7 @@
callee_parameters,
callee_locals,
index == 0,
+ index == array->frames() - 1,
popframe_extra_args);
// This pc doesn't have to be perfect just good enough to identify the frame
// as interpreted so the skeleton frame will be walkable
--- a/hotspot/src/share/vm/runtime/globals.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/runtime/globals.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -3644,7 +3644,10 @@
"Enable internal testing APIs") \
\
product(bool, PrintGCCause, true, \
- "Include GC cause in GC logging")
+ "Include GC cause in GC logging") \
+ \
+ product(bool, AllowNonVirtualCalls, false, \
+ "Obey the ACC_SUPER flag and allow invokenonvirtual calls")
/*
* Macros for factoring of globals
--- a/hotspot/src/share/vm/runtime/thread.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/runtime/thread.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1289,6 +1289,7 @@
void enable_stack_red_zone();
void disable_stack_red_zone();
+ inline bool stack_guard_zone_unused();
inline bool stack_yellow_zone_disabled();
inline bool stack_yellow_zone_enabled();
@@ -1759,6 +1760,10 @@
return (CompilerThread*)this;
}
+inline bool JavaThread::stack_guard_zone_unused() {
+ return _stack_guard_state == stack_guard_unused;
+}
+
inline bool JavaThread::stack_yellow_zone_disabled() {
return _stack_guard_state == stack_guard_yellow_disabled;
}
--- a/hotspot/src/share/vm/runtime/vframeArray.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/runtime/vframeArray.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -160,6 +160,7 @@
int callee_locals,
frame* caller,
bool is_top_frame,
+ bool is_bottom_frame,
int exec_mode) {
JavaThread* thread = (JavaThread*) Thread::current();
@@ -275,7 +276,8 @@
callee_locals,
caller,
iframe(),
- is_top_frame);
+ is_top_frame,
+ is_bottom_frame);
// Update the pc in the frame object and overwrite the temporary pc
// we placed in the skeletal frame now that we finally know the
@@ -420,6 +422,7 @@
int callee_parameters,
int callee_locals,
bool is_top_frame,
+ bool is_bottom_frame,
int popframe_extra_stack_expression_els) const {
assert(method()->max_locals() == locals()->size(), "just checking");
int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
@@ -431,7 +434,8 @@
caller_actual_parameters,
callee_parameters,
callee_locals,
- is_top_frame);
+ is_top_frame,
+ is_bottom_frame);
}
@@ -522,7 +526,7 @@
// Do the unpacking of interpreter frames; the frame at index 0 represents the top activation, so it has no callee
// Unpack the frames from the oldest (frames() -1) to the youngest (0)
- frame caller_frame = me;
+ frame* caller_frame = &me;
for (index = frames() - 1; index >= 0 ; index--) {
vframeArrayElement* elem = element(index); // caller
int callee_parameters, callee_locals;
@@ -542,13 +546,14 @@
elem->unpack_on_stack(caller_actual_parameters,
callee_parameters,
callee_locals,
- &caller_frame,
+ caller_frame,
index == 0,
+ index == frames() - 1,
exec_mode);
if (index == frames() - 1) {
Deoptimization::unwind_callee_save_values(elem->iframe(), this);
}
- caller_frame = *elem->iframe();
+ caller_frame = elem->iframe();
caller_actual_parameters = callee_parameters;
}
deallocate_monitor_chunks();
--- a/hotspot/src/share/vm/runtime/vframeArray.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/runtime/vframeArray.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -88,6 +88,7 @@
int on_stack_size(int caller_actual_parameters,
int callee_parameters,
int callee_locals,
+ bool is_bottom_frame,
bool is_top_frame,
int popframe_extra_stack_expression_els) const;
@@ -97,6 +98,7 @@
int callee_locals,
frame* caller,
bool is_top_frame,
+ bool is_bottom_frame,
int exec_mode);
#ifndef PRODUCT
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -336,7 +336,6 @@
nonstatic_field(Klass, _access_flags, AccessFlags) \
nonstatic_field(Klass, _subklass, Klass*) \
nonstatic_field(Klass, _next_sibling, Klass*) \
- nonproduct_nonstatic_field(Klass, _verify_count, int) \
nonstatic_field(Klass, _alloc_count, juint) \
nonstatic_field(MethodData, _size, int) \
nonstatic_field(MethodData, _method, Method*) \
--- a/hotspot/src/share/vm/runtime/vm_operations.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/runtime/vm_operations.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -94,6 +94,7 @@
template(ReportJavaOutOfMemory) \
template(JFRCheckpoint) \
template(Exit) \
+ template(LinuxDllLoad) \
class VM_Operation: public CHeapObj<mtInternal> {
public:
--- a/hotspot/src/share/vm/utilities/elfFile.cpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/utilities/elfFile.cpp Wed Jul 05 18:45:01 2017 +0200
@@ -197,4 +197,28 @@
return NULL;
}
+#ifdef LINUX
+bool ElfFile::specifies_noexecstack() {
+ Elf_Phdr phdr;
+ if (!m_file) return true;
+
+ if (!fseek(m_file, m_elfHdr.e_phoff, SEEK_SET)) {
+ for (int index = 0; index < m_elfHdr.e_phnum; index ++) {
+ if (fread((void*)&phdr, sizeof(Elf_Phdr), 1, m_file) != 1) {
+ m_status = NullDecoder::file_invalid;
+ return false;
+ }
+ if (phdr.p_type == PT_GNU_STACK) {
+ if (phdr.p_flags == (PF_R | PF_W)) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+ }
+ }
+ return false;
+}
+#endif
+
#endif // _WINDOWS
--- a/hotspot/src/share/vm/utilities/elfFile.hpp Mon Mar 18 10:46:49 2013 -0400
+++ b/hotspot/src/share/vm/utilities/elfFile.hpp Wed Jul 05 18:45:01 2017 +0200
@@ -43,6 +43,7 @@
typedef Elf64_Ehdr Elf_Ehdr;
typedef Elf64_Shdr Elf_Shdr;
+typedef Elf64_Phdr Elf_Phdr;
typedef Elf64_Sym Elf_Sym;
#if !defined(_ALLBSD_SOURCE) || defined(__APPLE__)
@@ -59,6 +60,7 @@
typedef Elf32_Ehdr Elf_Ehdr;
typedef Elf32_Shdr Elf_Shdr;
+typedef Elf32_Phdr Elf_Phdr;
typedef Elf32_Sym Elf_Sym;
#if !defined(_ALLBSD_SOURCE) || defined(__APPLE__)
@@ -123,6 +125,14 @@
ElfFile* next() const { return m_next; }
void set_next(ElfFile* file) { m_next = file; }
+ public:
+ // Returns true if the elf file is marked NOT to require an executable stack,
+ // or if the file could not be opened.
+ // Returns false if the elf file requires an executable stack, the stack flag
+ // is not set at all, or if the file can not be read.
+ // On systems other than linux it always returns false.
+ bool specifies_noexecstack() NOT_LINUX({ return false; });
+
protected:
ElfFile* m_next;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/8009761/Test8009761.java Wed Jul 05 18:45:01 2017 +0200
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8009761
+ * @summary Deoptimization on sparc doesn't set Llast_SP correctly in the interpreter frames it creates
+ * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation Test8009761
+ *
+ */
+
+public class Test8009761 {
+
+ static class UnloadedClass {
+ volatile int i;
+ }
+
+ static Object m1(boolean deopt) {
+ // When running interpreted, on sparc, the caller's stack is
+ // extended for the locals and the caller's frame is restored
+ // on return.
+ long l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12,
+ l13, l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24,
+ l25, l26, l27, l28, l29, l30, l31, l32, l33, l34, l35, l36,
+ l37, l38, l39, l40, l41, l42, l43, l44, l45, l46, l47, l48,
+ l49, l50, l51, l52, l53, l54, l55, l56, l57, l58, l59, l60,
+ l61, l62, l63, l64, l65, l66, l67, l68, l69, l70, l71, l72,
+ l73, l74, l75, l76, l77, l78, l79, l80, l81, l82, l83, l84,
+ l85, l86, l87, l88, l89, l90, l91, l92, l93, l94, l95, l96,
+ l97, l98, l99, l100, l101, l102, l103, l104, l105, l106, l107,
+ l108, l109, l110, l111, l112, l113, l114, l115, l116, l117,
+ l118, l119, l120, l121, l122, l123, l124, l125, l126, l127,
+ l128, l129, l130, l131, l132, l133, l134, l135, l136, l137,
+ l138, l139, l140, l141, l142, l143, l144, l145, l146, l147,
+ l148, l149, l150, l151, l152, l153, l154, l155, l156, l157,
+ l158, l159, l160, l161, l162, l163, l164, l165, l166, l167,
+ l168, l169, l170, l171, l172, l173, l174, l175, l176, l177,
+ l178, l179, l180, l181, l182, l183, l184, l185, l186, l187,
+ l188, l189, l190, l191, l192, l193, l194, l195, l196, l197,
+ l198, l199, l200, l201, l202, l203, l204, l205, l206, l207,
+ l208, l209, l210, l211, l212, l213, l214, l215, l216, l217,
+ l218, l219, l220, l221, l222, l223, l224, l225, l226, l227,
+ l228, l229, l230, l231, l232, l233, l234, l235, l236, l237,
+ l238, l239, l240, l241, l242, l243, l244, l245, l246, l247,
+ l248, l249, l250, l251, l252, l253, l254, l255, l256, l257,
+ l258, l259, l260, l261, l262, l263, l264, l265, l266, l267,
+ l268, l269, l270, l271, l272, l273, l274, l275, l276, l277,
+ l278, l279, l280, l281, l282, l283, l284, l285, l286, l287,
+ l288, l289, l290, l291, l292, l293, l294, l295, l296, l297,
+ l298, l299, l300, l301, l302, l303, l304, l305, l306, l307,
+ l308, l309, l310, l311, l312, l313, l314, l315, l316, l317,
+ l318, l319, l320, l321, l322, l323, l324, l325, l326, l327,
+ l328, l329, l330, l331, l332, l333, l334, l335, l336, l337,
+ l338, l339, l340, l341, l342, l343, l344, l345, l346, l347,
+ l348, l349, l350, l351, l352, l353, l354, l355, l356, l357,
+ l358, l359, l360, l361, l362, l363, l364, l365, l366, l367,
+ l368, l369, l370, l371, l372, l373, l374, l375, l376, l377,
+ l378, l379, l380, l381, l382, l383, l384, l385, l386, l387,
+ l388, l389, l390, l391, l392, l393, l394, l395, l396, l397,
+ l398, l399, l400, l401, l402, l403, l404, l405, l406, l407,
+ l408, l409, l410, l411, l412, l413, l414, l415, l416, l417,
+ l418, l419, l420, l421, l422, l423, l424, l425, l426, l427,
+ l428, l429, l430, l431, l432, l433, l434, l435, l436, l437,
+ l438, l439, l440, l441, l442, l443, l444, l445, l446, l447,
+ l448, l449, l450, l451, l452, l453, l454, l455, l456, l457,
+ l458, l459, l460, l461, l462, l463, l464, l465, l466, l467,
+ l468, l469, l470, l471, l472, l473, l474, l475, l476, l477,
+ l478, l479, l480, l481, l482, l483, l484, l485, l486, l487,
+ l488, l489, l490, l491, l492, l493, l494, l495, l496, l497,
+ l498, l499, l500, l501, l502, l503, l504, l505, l506, l507,
+ l508, l509, l510, l511;
+
+ long ll0, ll1, ll2, ll3, ll4, ll5, ll6, ll7, ll8, ll9, ll10, ll11, ll12,
+ ll13, ll14, ll15, ll16, ll17, ll18, ll19, ll20, ll21, ll22, ll23, ll24,
+ ll25, ll26, ll27, ll28, ll29, ll30, ll31, ll32, ll33, ll34, ll35, ll36,
+ ll37, ll38, ll39, ll40, ll41, ll42, ll43, ll44, ll45, ll46, ll47, ll48,
+ ll49, ll50, ll51, ll52, ll53, ll54, ll55, ll56, ll57, ll58, ll59, ll60,
+ ll61, ll62, ll63, ll64, ll65, ll66, ll67, ll68, ll69, ll70, ll71, ll72,
+ ll73, ll74, ll75, ll76, ll77, ll78, ll79, ll80, ll81, ll82, ll83, ll84,
+ ll85, ll86, ll87, ll88, ll89, ll90, ll91, ll92, ll93, ll94, ll95, ll96,
+ ll97, ll98, ll99, ll100, ll101, ll102, ll103, ll104, ll105, ll106, ll107,
+ ll108, ll109, ll110, ll111, ll112, ll113, ll114, ll115, ll116, ll117,
+ ll118, ll119, ll120, ll121, ll122, ll123, ll124, ll125, ll126, ll127,
+ ll128, ll129, ll130, ll131, ll132, ll133, ll134, ll135, ll136, ll137,
+ ll138, ll139, ll140, ll141, ll142, ll143, ll144, ll145, ll146, ll147,
+ ll148, ll149, ll150, ll151, ll152, ll153, ll154, ll155, ll156, ll157,
+ ll158, ll159, ll160, ll161, ll162, ll163, ll164, ll165, ll166, ll167,
+ ll168, ll169, ll170, ll171, ll172, ll173, ll174, ll175, ll176, ll177,
+ ll178, ll179, ll180, ll181, ll182, ll183, ll184, ll185, ll186, ll187,
+ ll188, ll189, ll190, ll191, ll192, ll193, ll194, ll195, ll196, ll197,
+ ll198, ll199, ll200, ll201, ll202, ll203, ll204, ll205, ll206, ll207,
+ ll208, ll209, ll210, ll211, ll212, ll213, ll214, ll215, ll216, ll217,
+ ll218, ll219, ll220, ll221, ll222, ll223, ll224, ll225, ll226, ll227,
+ ll228, ll229, ll230, ll231, ll232, ll233, ll234, ll235, ll236, ll237,
+ ll238, ll239, ll240, ll241, ll242, ll243, ll244, ll245, ll246, ll247,
+ ll248, ll249, ll250, ll251, ll252, ll253, ll254, ll255, ll256, ll257,
+ ll258, ll259, ll260, ll261, ll262, ll263, ll264, ll265, ll266, ll267,
+ ll268, ll269, ll270, ll271, ll272, ll273, ll274, ll275, ll276, ll277,
+ ll278, ll279, ll280, ll281, ll282, ll283, ll284, ll285, ll286, ll287,
+ ll288, ll289, ll290, ll291, ll292, ll293, ll294, ll295, ll296, ll297,
+ ll298, ll299, ll300, ll301, ll302, ll303, ll304, ll305, ll306, ll307,
+ ll308, ll309, ll310, ll311, ll312, ll313, ll314, ll315, ll316, ll317,
+ ll318, ll319, ll320, ll321, ll322, ll323, ll324, ll325, ll326, ll327,
+ ll328, ll329, ll330, ll331, ll332, ll333, ll334, ll335, ll336, ll337,
+ ll338, ll339, ll340, ll341, ll342, ll343, ll344, ll345, ll346, ll347,
+ ll348, ll349, ll350, ll351, ll352, ll353, ll354, ll355, ll356, ll357,
+ ll358, ll359, ll360, ll361, ll362, ll363, ll364, ll365, ll366, ll367,
+ ll368, ll369, ll370, ll371, ll372, ll373, ll374, ll375, ll376, ll377,
+ ll378, ll379, ll380, ll381, ll382, ll383, ll384, ll385, ll386, ll387,
+ ll388, ll389, ll390, ll391, ll392, ll393, ll394, ll395, ll396, ll397,
+ ll398, ll399, ll400, ll401, ll402, ll403, ll404, ll405, ll406, ll407,
+ ll408, ll409, ll410, ll411, ll412, ll413, ll414, ll415, ll416, ll417,
+ ll418, ll419, ll420, ll421, ll422, ll423, ll424, ll425, ll426, ll427,
+ ll428, ll429, ll430, ll431, ll432, ll433, ll434, ll435, ll436, ll437,
+ ll438, ll439, ll440, ll441, ll442, ll443, ll444, ll445, ll446, ll447,
+ ll448, ll449, ll450, ll451, ll452, ll453, ll454, ll455, ll456, ll457,
+ ll458, ll459, ll460, ll461, ll462, ll463, ll464, ll465, ll466, ll467,
+ ll468, ll469, ll470, ll471, ll472, ll473, ll474, ll475, ll476, ll477,
+ ll478, ll479, ll480, ll481, ll482, ll483, ll484, ll485, ll486, ll487,
+ ll488, ll489, ll490, ll491, ll492, ll493, ll494, ll495, ll496, ll497,
+ ll498, ll499, ll500, ll501, ll502, ll503, ll504, ll505, ll506, ll507,
+ ll508, ll509, ll510, ll511;
+
+ if (deopt) {
+ UnloadedClass res = new UnloadedClass(); // sufficient to force deopt with c2 but not c1
+ res.i = 0; // forces deopt with c1
+ return res;
+ }
+ return null;
+ }
+
+ static int count = 0;
+
+ static void m2() {
+ // Will be called recursively until a stack overflow
+ // exception. Makes sure it has a lot of locals so that it's
+ // not called a sufficient number of times to trigger
+ // compilation.
+
+ long l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12,
+ l13, l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24,
+ l25, l26, l27, l28, l29, l30, l31, l32, l33, l34, l35, l36,
+ l37, l38, l39, l40, l41, l42, l43, l44, l45, l46, l47, l48,
+ l49, l50, l51, l52, l53, l54, l55, l56, l57, l58, l59, l60,
+ l61, l62, l63, l64, l65, l66, l67, l68, l69, l70, l71, l72,
+ l73, l74, l75, l76, l77, l78, l79, l80, l81, l82, l83, l84,
+ l85, l86, l87, l88, l89, l90, l91, l92, l93, l94, l95, l96,
+ l97, l98, l99, l100, l101, l102, l103, l104, l105, l106, l107,
+ l108, l109, l110, l111, l112, l113, l114, l115, l116, l117,
+ l118, l119, l120, l121, l122, l123, l124, l125, l126, l127,
+ l128, l129, l130, l131, l132, l133, l134, l135, l136, l137,
+ l138, l139, l140, l141, l142, l143, l144, l145, l146, l147,
+ l148, l149, l150, l151, l152, l153, l154, l155, l156, l157,
+ l158, l159, l160, l161, l162, l163, l164, l165, l166, l167,
+ l168, l169, l170, l171, l172, l173, l174, l175, l176, l177,
+ l178, l179, l180, l181, l182, l183, l184, l185, l186, l187,
+ l188, l189, l190, l191, l192, l193, l194, l195, l196, l197,
+ l198, l199, l200, l201, l202, l203, l204, l205, l206, l207,
+ l208, l209, l210, l211, l212, l213, l214, l215, l216, l217,
+ l218, l219, l220, l221, l222, l223, l224, l225, l226, l227,
+ l228, l229, l230, l231, l232, l233, l234, l235, l236, l237,
+ l238, l239, l240, l241, l242, l243, l244, l245, l246, l247,
+ l248, l249, l250, l251, l252, l253, l254, l255, l256, l257,
+ l258, l259, l260, l261, l262, l263, l264, l265, l266, l267,
+ l268, l269, l270, l271, l272, l273, l274, l275, l276, l277,
+ l278, l279, l280, l281, l282, l283, l284, l285, l286, l287,
+ l288, l289, l290, l291, l292, l293, l294, l295, l296, l297,
+ l298, l299, l300, l301, l302, l303, l304, l305, l306, l307,
+ l308, l309, l310, l311, l312, l313, l314, l315, l316, l317,
+ l318, l319, l320, l321, l322, l323, l324, l325, l326, l327,
+ l328, l329, l330, l331, l332, l333, l334, l335, l336, l337,
+ l338, l339, l340, l341, l342, l343, l344, l345, l346, l347,
+ l348, l349, l350, l351, l352, l353, l354, l355, l356, l357,
+ l358, l359, l360, l361, l362, l363, l364, l365, l366, l367,
+ l368, l369, l370, l371, l372, l373, l374, l375, l376, l377,
+ l378, l379, l380, l381, l382, l383, l384, l385, l386, l387,
+ l388, l389, l390, l391, l392, l393, l394, l395, l396, l397,
+ l398, l399, l400, l401, l402, l403, l404, l405, l406, l407,
+ l408, l409, l410, l411, l412, l413, l414, l415, l416, l417,
+ l418, l419, l420, l421, l422, l423, l424, l425, l426, l427,
+ l428, l429, l430, l431, l432, l433, l434, l435, l436, l437,
+ l438, l439, l440, l441, l442, l443, l444, l445, l446, l447,
+ l448, l449, l450, l451, l452, l453, l454, l455, l456, l457,
+ l458, l459, l460, l461, l462, l463, l464, l465, l466, l467,
+ l468, l469, l470, l471, l472, l473, l474, l475, l476, l477,
+ l478, l479, l480, l481, l482, l483, l484, l485, l486, l487,
+ l488, l489, l490, l491, l492, l493, l494, l495, l496, l497,
+ l498, l499, l500, l501, l502, l503, l504, l505, l506, l507,
+ l508, l509, l510, l511;
+
+ count++;
+ m2();
+ }
+
+ static Object m3(boolean overflow_stack, boolean deopt) {
+ if (overflow_stack) {
+ m2();
+ return null;
+ }
+ Object o = m1(deopt);
+ if (deopt) {
+ m2();
+ }
+ return o;
+ }
+
+ static public void main(String[] args) {
+ int c1;
+ // Call m2 from m3 recursively until stack overflow. Count the number of recursive calls.
+ try {
+ m3(true, false);
+ } catch(StackOverflowError soe) {
+ }
+ c1 = count;
+ // Force the compilation of m3() that will inline m1()
+ for (int i = 0; i < 20000; i++) {
+ m3(false, false);
+ }
+ count = 0;
+ // Force deoptimization of m3() in m1(), then return from m1()
+ // to m3(), call recursively m2(). If deoptimization correctly
+ // built the interpreter stack for m3()/m1() then we should be
+ // able to call m2() recursively as many times as before.
+ try {
+ m3(false, true);
+ } catch(StackOverflowError soe) {
+ }
+ if (c1 != count) {
+ System.out.println("Failed: init recursive calls: " + c1 + ". After deopt " + count);
+ System.exit(97);
+ } else {
+ System.out.println("PASSED");
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/7107135/Test.java Wed Jul 05 18:45:01 2017 +0200
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2002-2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011 SAP AG. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+class Test {
+
+ static boolean loadLib(String libName){
+ try {
+ System.loadLibrary(libName);
+ System.out.println("Loaded library "+ libName + ".");
+ return true;
+ } catch (SecurityException e) {
+ System.out.println("loadLibrary(\"" + libName + "\") throws: " + e + "\n");
+ } catch (UnsatisfiedLinkError e) {
+ System.out.println("loadLibrary(\"" + libName + "\") throws: " + e + "\n");
+ }
+ return false;
+ }
+
+ public static int counter = 1;
+
+ static int Runner() {
+ counter = counter * -1;
+ int i = counter;
+ if(counter < 2) counter += Runner();
+ return i;
+ }
+
+ public static int run() {
+ try{
+ Runner();
+ } catch (StackOverflowError e) {
+ System.out.println("Caught stack overflow error.");
+ return 0;
+ } catch (OutOfMemoryError e) {
+ return 0;
+ }
+ return 2;
+ }
+
+ public static void main(String argv[]) {
+ loadLib(argv[0]);
+ System.exit(run());
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/7107135/Test7107135.sh Wed Jul 05 18:45:01 2017 +0200
@@ -0,0 +1,98 @@
+#!/bin/sh
+
+#
+# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011 SAP AG. All Rights Reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+##
+## @test Test7107135.sh
+## @bug 7107135
+## @summary Stack guard pages lost after loading library with executable stack.
+## @run shell Test7107135.sh
+##
+
+if [ "${TESTSRC}" = "" ]
+then TESTSRC=.
+fi
+
+if [ "${TESTJAVA}" = "" ]
+then
+ PARENT=`dirname \`which java\``
+ TESTJAVA=`dirname ${PARENT}`
+ echo "TESTJAVA not set, selecting " ${TESTJAVA}
+ echo "If this is incorrect, try setting the variable manually."
+fi
+
+BIT_FLAG=""
+
+# set platform-dependent variables
+OS=`uname -s`
+case "$OS" in
+ Linux)
+ NULL=/dev/null
+ PS=":"
+ FS="/"
+ ;;
+ *)
+ NULL=NUL
+ PS=";"
+ FS="\\"
+ echo "Test passed; only valid for Linux"
+ exit 0;
+ ;;
+esac
+
+ARCH=`uname -m`
+
+THIS_DIR=`pwd`
+
+cp ${TESTSRC}${FS}*.java ${THIS_DIR}
+${TESTJAVA}${FS}bin${FS}javac *.java
+
+gcc -fPIC -shared -c -o test.o -I${TESTJAVA}${FS}include -I${TESTJAVA}${FS}include${FS}linux ${TESTSRC}${FS}test.c
+ld -shared -z execstack -o libtest-rwx.so test.o
+ld -shared -z noexecstack -o libtest-rw.so test.o
+
+
+LD_LIBRARY_PATH=${THIS_DIR}
+echo LD_LIBRARY_PATH = ${LD_LIBRARY_PATH}
+export LD_LIBRARY_PATH
+
+# This should not fail.
+echo Check testprogram. Expected to pass:
+echo ${TESTJAVA}${FS}bin${FS}java -cp ${THIS_DIR} Test test-rw
+${TESTJAVA}${FS}bin${FS}java -cp ${THIS_DIR} Test test-rw
+
+echo
+echo Test changing of stack protection:
+echo ${TESTJAVA}${FS}bin${FS}java -cp ${THIS_DIR} Test test-rw
+${TESTJAVA}${FS}bin${FS}java -cp ${THIS_DIR} Test test-rwx
+
+if [ "$?" == "0" ]
+then
+ echo
+ echo ${TESTJAVA}${FS}bin${FS}java -cp ${THIS_DIR} TestMT test-rwx
+ ${TESTJAVA}${FS}bin${FS}java -cp ${THIS_DIR} TestMT test-rwx
+fi
+
+exit $?
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/7107135/TestMT.java Wed Jul 05 18:45:01 2017 +0200
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2002-2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011 SAP AG. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+class TestMT {
+
+ static boolean loadLib(String libName) {
+ try {
+ System.loadLibrary(libName);
+ System.out.println("Loaded library "+ libName + ".");
+ return true;
+ } catch (SecurityException e) {
+ System.out.println("loadLibrary(\"" + libName + "\") throws: " + e + "\n");
+ } catch (UnsatisfiedLinkError e) {
+ System.out.println("loadLibrary(\"" + libName + "\") throws: " + e + "\n");
+ }
+ return false;
+ }
+
+ public static int counter = 1;
+ static int Runner() {
+ counter = counter * -1;
+ int i = counter;
+ if (counter < 2) counter += Runner();
+ return i;
+ }
+
+ public static int run(String msg) {
+ try {
+ Runner();
+ } catch (StackOverflowError e) {
+ System.out.println(msg + " caught stack overflow error.");
+ return 0;
+ } catch (OutOfMemoryError e) {
+ return 0;
+ }
+ return 2;
+ }
+
+ public static void main(String argv[]) {
+ try {
+ for (int i = 0; i < 20; i++) {
+ Thread t = new DoStackOverflow("SpawnedThread " + i);
+ t.start();
+ }
+ run("Main thread");
+ loadLib("test-rwx");
+ run("Main thread");
+ } catch (Exception e) {
+ System.out.println(e);
+ }
+ }
+
+ static class DoStackOverflow extends Thread {
+ public DoStackOverflow(String name) {
+ super(name);
+ }
+ public void run() {
+ for (int i = 0; i < 10; ++i) {
+ TestMT.run(getName());
+ yield();
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/7107135/test.c Wed Jul 05 18:45:01 2017 +0200
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2002-2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011 SAP AG. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include "jni.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+JNIEXPORT jint JNICALL Java_Test_someMethod(JNIEnv *env, jobject mainObject) {
+ return 3;
+}
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/8003985/Test8003985.java Wed Jul 05 18:45:01 2017 +0200
@@ -0,0 +1,302 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.lang.Class;
+import java.lang.String;
+import java.lang.System;
+import java.lang.management.ManagementFactory;
+import java.lang.management.RuntimeMXBean;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CyclicBarrier;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import sun.misc.Unsafe;
+import sun.misc.Contended;
+
+/*
+ * @test
+ * @bug 8003985
+ * @summary Support Contended Annotation - JEP 142
+ *
+ * @run main/othervm -XX:-RestrictContended Test8003985
+ */
+public class Test8003985 {
+
+ private static final Unsafe U;
+ private static int ADDRESS_SIZE;
+ private static int HEADER_SIZE;
+
+ static {
+ // steal Unsafe
+ try {
+ Field unsafe = Unsafe.class.getDeclaredField("theUnsafe");
+ unsafe.setAccessible(true);
+ U = (Unsafe) unsafe.get(null);
+ } catch (NoSuchFieldException | IllegalAccessException e) {
+ throw new IllegalStateException(e);
+ }
+
+ // When running with CompressedOops on 64-bit platform, the address size
+ // reported by Unsafe is still 8, while the real reference fields are 4 bytes long.
+ // Try to guess the reference field size with this naive trick.
+ try {
+ long off1 = U.objectFieldOffset(CompressedOopsClass.class.getField("obj1"));
+ long off2 = U.objectFieldOffset(CompressedOopsClass.class.getField("obj2"));
+ ADDRESS_SIZE = (int) Math.abs(off2 - off1);
+ HEADER_SIZE = (int) Math.min(off1, off2);
+ } catch (NoSuchFieldException e) {
+ ADDRESS_SIZE = -1;
+ }
+ }
+
+ static class CompressedOopsClass {
+ public Object obj1;
+ public Object obj2;
+ }
+
+ public static boolean arePaddedPairwise(Class klass, String field1, String field2) throws Exception {
+ Field f1 = klass.getDeclaredField(field1);
+ Field f2 = klass.getDeclaredField(field2);
+
+ if (isStatic(f1) != isStatic(f2)) {
+ return true; // these guys are in naturally disjoint locations
+ }
+
+ int diff = offset(f1) - offset(f2);
+ if (diff < 0) {
+ // f1 is first
+ return (offset(f2) - (offset(f1) + getSize(f1))) > 64;
+ } else {
+ // f2 is first
+ return (offset(f1) - (offset(f2) + getSize(f2))) > 64;
+ }
+ }
+
+ public static boolean isPadded(Class klass, String field1) throws Exception {
+ Field f1 = klass.getDeclaredField(field1);
+
+ if (isStatic(f1)) {
+ return offset(f1) > 128 + 64;
+ }
+
+ return offset(f1) > 64;
+ }
+
+ public static boolean sameLayout(Class klass1, Class klass2) throws Exception {
+ for (Field f1 : klass1.getDeclaredFields()) {
+ Field f2 = klass2.getDeclaredField(f1.getName());
+ if (offset(f1) != offset(f2)) {
+ return false;
+ }
+ }
+
+ for (Field f2 : klass1.getDeclaredFields()) {
+ Field f1 = klass2.getDeclaredField(f2.getName());
+ if (offset(f1) != offset(f2)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ public static boolean isStatic(Field field) {
+ return Modifier.isStatic(field.getModifiers());
+ }
+
+ public static int offset(Field field) {
+ if (isStatic(field)) {
+ return (int) U.staticFieldOffset(field);
+ } else {
+ return (int) U.objectFieldOffset(field);
+ }
+ }
+
+ public static int getSize(Field field) {
+ Class type = field.getType();
+ if (type == byte.class) { return 1; }
+ if (type == boolean.class) { return 1; }
+ if (type == short.class) { return 2; }
+ if (type == char.class) { return 2; }
+ if (type == int.class) { return 4; }
+ if (type == float.class) { return 4; }
+ if (type == long.class) { return 8; }
+ if (type == double.class) { return 8; }
+ return ADDRESS_SIZE;
+ }
+
+ public static void main(String[] args) throws Exception {
+ boolean endResult = true;
+
+ // --------------- INSTANCE FIELDS ---------------------
+
+ if (arePaddedPairwise(Test1.class, "int1", "int2") ||
+ isPadded(Test1.class, "int1") ||
+ isPadded(Test1.class, "int2")) {
+ System.err.println("Test1 failed");
+ endResult &= false;
+ }
+
+ if (!arePaddedPairwise(Test2.class, "int1", "int2") ||
+ !isPadded(Test2.class, "int1") ||
+ isPadded(Test2.class, "int2")) {
+ System.err.println("Test2 failed");
+ endResult &= false;
+ }
+
+ if (!arePaddedPairwise(Test3.class, "int1", "int2") ||
+ !isPadded(Test3.class, "int1") ||
+ !isPadded(Test3.class, "int2")) {
+ System.err.println("Test3 failed");
+ endResult &= false;
+ }
+
+ if (arePaddedPairwise(Test4.class, "int1", "int2") ||
+ !isPadded(Test4.class, "int1") ||
+ !isPadded(Test4.class, "int2")) {
+ System.err.println("Test4 failed");
+ endResult &= false;
+ }
+
+ if (!arePaddedPairwise(Test5.class, "int1", "int2") ||
+ !isPadded(Test5.class, "int1") ||
+ !isPadded(Test5.class, "int2")) {
+ System.err.println("Test5 failed");
+ endResult &= false;
+ }
+
+ if (!arePaddedPairwise(Test6.class, "int1", "int2") ||
+ !isPadded(Test6.class, "int1") ||
+ !isPadded(Test6.class, "int2")) {
+ System.err.println("Test6 failed");
+ endResult &= false;
+ }
+
+ if (arePaddedPairwise(Test7.class, "int1", "int2") ||
+ !isPadded(Test7.class, "int1") ||
+ !isPadded(Test7.class, "int2")) {
+ System.err.println("Test7 failed");
+ endResult &= false;
+ }
+
+ if (!arePaddedPairwise(Test8.class, "int1", "int2") ||
+ !isPadded(Test8.class, "int1") ||
+ !isPadded(Test8.class, "int2")) {
+ System.err.println("Test8 failed");
+ endResult &= false;
+ }
+
+ if (!arePaddedPairwise(Test9.class, "int1", "int2") ||
+ !isPadded(Test9.class, "int1") ||
+ !isPadded(Test9.class, "int2")) {
+ System.err.println("Test9 failed");
+ endResult &= false;
+ }
+
+ if (!sameLayout(Test4.class, Test7.class)) {
+ System.err.println("Test4 and Test7 have different layouts");
+ endResult &= false;
+ }
+
+ if (!sameLayout(Test5.class, Test6.class)) {
+ System.err.println("Test5 and Test6 have different layouts");
+ endResult &= false;
+ }
+
+ if (!sameLayout(Test8.class, Test9.class)) {
+ System.err.println("Test8 and Test9 have different layouts");
+ endResult &= false;
+ }
+
+ System.out.println(endResult ? "Test PASSES" : "Test FAILS");
+ if (!endResult) {
+ throw new Error("Test failed");
+ }
+ }
+
+ // ----------------------------------- INSTANCE FIELDS -----------------------------------------
+
+ // naturally packed
+ public static class Test1 {
+ private int int1;
+ private int int2;
+ }
+
+ // int1 is padded
+ public static class Test2 {
+ @Contended private int int1;
+ private int int2;
+ }
+
+ // both fields are padded
+ public static class Test3 {
+ @Contended private int int1;
+ @Contended private int int2;
+ }
+
+ // fields are padded in the singular group
+ public static class Test4 {
+ @Contended("sameGroup") private int int1;
+ @Contended("sameGroup") private int int2;
+ }
+
+ // fields are padded in disjoint groups
+ public static class Test5 {
+ @Contended("diffGroup1") private int int1;
+ @Contended("diffGroup2") private int int2;
+ }
+
+ // fields are padded in disjoint groups
+ public static class Test6 {
+ @Contended private int int1;
+ @Contended("diffGroup2") private int int2;
+ }
+
+ // fields are padded in the singular group
+ @Contended
+ public static class Test7 {
+ private int int1;
+ private int int2;
+ }
+
+ // all fields are padded as the group, and one field is padded specifically
+ @Contended
+ public static class Test8 {
+ @Contended private int int1;
+ private int int2;
+ }
+
+ // all fields are padded as the group, and one field is padded specifically
+ @Contended
+ public static class Test9 {
+ @Contended("group") private int int1;
+ private int int2;
+ }
+
+}
+
--- a/make/Defs-internal.gmk Mon Mar 18 10:46:49 2013 -0400
+++ b/make/Defs-internal.gmk Wed Jul 05 18:45:01 2017 +0200
@@ -100,6 +100,7 @@
ABS_JAXWS_TOPDIR:=$(call OptFullPath,"$(JAXWS_TOPDIR)")
ABS_JDK_TOPDIR:=$(call OptFullPath,"$(JDK_TOPDIR)")
ABS_HOTSPOT_TOPDIR:=$(call OptFullPath,"$(HOTSPOT_TOPDIR)")
+ABS_NASHORN_TOPDIR:=$(call OptFullPath,"$(NASHORN_TOPDIR)")
ABS_INSTALL_TOPDIR:=$(call OptFullPath,"$(INSTALL_TOPDIR)")
ABS_SPONSORS_TOPDIR:=$(call OptFullPath,"$(SPONSORS_TOPDIR)")
ABS_DEPLOY_TOPDIR:=$(call OptFullPath,"$(DEPLOY_TOPDIR)")
@@ -165,6 +166,15 @@
endif
endif
+NASHORN_SRC_AVAILABLE := $(call MkExists,$(NASHORN_TOPDIR)/make/Makefile)
+ifndef BUILD_NASHORN
+ ifdef ALT_NASHORN_DIST
+ BUILD_NASHORN := false
+ else
+ BUILD_NASHORN := $(NASHORN_SRC_AVAILABLE)
+ endif
+endif
+
DEPLOY_SRC_AVAILABLE := $(call MkExists,$(DEPLOY_TOPDIR)/make/Makefile)
ifndef BUILD_DEPLOY
BUILD_DEPLOY := $(DEPLOY_SRC_AVAILABLE)
@@ -308,6 +318,10 @@
JAXWS_OUTPUTDIR = $(ABS_OUTPUTDIR)/jaxws
ABS_JAXWS_DIST = $(JAXWS_OUTPUTDIR)/dist
endif
+ifndef ALT_NASHORN_DIST
+ NASHORN_OUTPUTDIR = $(ABS_OUTPUTDIR)/nashorn
+ ABS_NASHORN_DIST = $(NASHORN_OUTPUTDIR)/dist
+endif
# Common make arguments (supplied to all component builds)
COMMON_BUILD_ARGUMENTS = \
--- a/make/jdk-rules.gmk Mon Mar 18 10:46:49 2013 -0400
+++ b/make/jdk-rules.gmk Wed Jul 05 18:45:01 2017 +0200
@@ -62,6 +62,9 @@
ifeq ($(BUILD_JAXWS), true)
JDK_BUILD_ARGUMENTS += ALT_JAXWS_DIST=$(ABS_JAXWS_DIST)
endif
+ifeq ($(BUILD_NASHORN), true)
+ JDK_BUILD_ARGUMENTS += ALT_NASHORN_DIST=$(ABS_NASHORN_DIST)
+endif
ifeq ($(BUILD_HOTSPOT), true)
JDK_BUILD_ARGUMENTS += ALT_HOTSPOT_IMPORT_PATH=$(HOTSPOT_DIR)/import
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/make/nashorn-rules.gmk Wed Jul 05 18:45:01 2017 +0200
@@ -0,0 +1,59 @@
+#
+# Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation. Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+################################################################
+# NASHORN TARGETS
+################################################################
+
+NASHORN_BUILD_ARGUMENTS = \
+ $(COMMON_BUILD_ARGUMENTS) \
+ ALT_OUTPUTDIR=$(NASHORN_OUTPUTDIR) \
+ ALT_BOOTDIR=$(BOOTDIR) \
+ ALT_JDK_IMPORT_PATH=$(JDK_IMPORT_PATH)
+
+ifeq ($(BUILD_LANGTOOLS), true)
+ NASHORN_BUILD_ARGUMENTS += ALT_LANGTOOLS_DIST=$(ABS_LANGTOOLS_DIST)
+endif
+
+# Default targets
+NASHORN = nashorn-build
+
+nashorn: nashorn-build
+nashorn-build:
+ $(MKDIR) -p $(NASHORN_OUTPUTDIR)
+ @$(call MakeStart,nashorn,all)
+ ($(CD) $(NASHORN_TOPDIR)/make && \
+ $(MAKE) $(NASHORN_BUILD_ARGUMENTS) all)
+ @$(call MakeFinish,nashorn,all)
+
+nashorn-clobber::
+ $(MKDIR) -p $(NASHORN_OUTPUTDIR)
+ @$(call MakeStart,nashorn,clobber)
+ ($(CD) $(NASHORN_TOPDIR)/make && \
+ $(MAKE) $(NASHORN_BUILD_ARGUMENTS) clobber)
+ @$(call MakeFinish,nashorn,clobber)
+
+.PHONY: nashorn nashorn-build nashorn-clobber
+
--- a/make/sanity-rules.gmk Mon Mar 18 10:46:49 2013 -0400
+++ b/make/sanity-rules.gmk Wed Jul 05 18:45:01 2017 +0200
@@ -182,6 +182,14 @@
"" >> $(WARNING_FILE)
endif
endif
+ifeq ($(NASHORN_SRC_AVAILABLE), true)
+ ifneq ($(BUILD_NASHORN), true)
+ @$(ECHO) "WARNING: You are not building the NASHORN sources.\n" \
+ " The nashorn files will be obtained from \n" \
+ " the location set in ALT_JDK_IMPORT_PATH. \n" \
+ "" >> $(WARNING_FILE)
+ endif
+endif
ifeq ($(DEPLOY_SRC_AVAILABLE), true)
ifneq ($(BUILD_DEPLOY), true)
@$(ECHO) "WARNING: You are not building the DEPLOY sources.\n" \
@@ -268,6 +276,9 @@
ifeq ($(JDK_SRC_AVAILABLE), true)
@$(ECHO) " JDK_TOPDIR = $(JDK_TOPDIR)" >> $(MESSAGE_FILE)
endif
+ifeq ($(NASHORN_SRC_AVAILABLE), true)
+ @$(ECHO) " NASHORN_TOPDIR = $(NASHORN_TOPDIR)" >> $(MESSAGE_FILE)
+endif
ifeq ($(DEPLOY_SRC_AVAILABLE), true)
@$(ECHO) " DEPLOY_TOPDIR = $(DEPLOY_TOPDIR)" >> $(MESSAGE_FILE)
endif
@@ -303,6 +314,9 @@
ifeq ($(JDK_SRC_AVAILABLE), true)
@$(ECHO) " BUILD_JDK = $(BUILD_JDK) " >> $(MESSAGE_FILE)
endif
+ifeq ($(NASHORN_SRC_AVAILABLE), true)
+ @$(ECHO) " BUILD_NASHORN = $(BUILD_NASHORN) " >> $(MESSAGE_FILE)
+endif
ifeq ($(DEPLOY_SRC_AVAILABLE), true)
@$(ECHO) " BUILD_DEPLOY = $(BUILD_DEPLOY) " >> $(MESSAGE_FILE)
endif
--- a/make/scripts/hgforest.sh Mon Mar 18 10:46:49 2013 -0400
+++ b/make/scripts/hgforest.sh Wed Jul 05 18:45:01 2017 +0200
@@ -40,7 +40,7 @@
repos=""
repos_extra=""
if [ "${command}" = "clone" -o "${command}" = "fclone" ] ; then
- subrepos="corba jaxp jaxws langtools jdk hotspot"
+ subrepos="corba jaxp jaxws langtools jdk hotspot nashorn"
if [ -f .hg/hgrc ] ; then
pull_default=`hg paths default`
if [ "${pull_default}" = "" ] ; then
--- a/make/scripts/webrev.ksh Mon Mar 18 10:46:49 2013 -0400
+++ b/make/scripts/webrev.ksh Wed Jul 05 18:45:01 2017 +0200
@@ -3023,7 +3023,7 @@
cleanup='s|\[#\(JDK-[0-9]\{5,\}\)\] \(.*\)|\1 : \2|'
fi
if [[ -n $WGET ]]; then
- msg=`$WGET --timeout=10 --tries=1 -q $url -O - | grep '<title>' | sed 's/<title>\(.*\)<\/title>/\1/' | sed "$cleanup"`
+ msg=`$WGET --timeout=10 --tries=1 -q $url -O - | grep '<title>' | sed 's/<title>\(.*\)<\/title>/\1/' | sed "$cleanup" | html_quote`
fi
if [[ -z $msg ]]; then
msg="${id}"