--- a/.hgtags Mon Dec 17 08:28:27 2012 -0500
+++ b/.hgtags Mon Dec 17 08:30:06 2012 -0500
@@ -187,3 +187,5 @@
21ee1dd7b809639284900a128b9b656a592ebc7a jdk8-b63
70fa4b11f26522e69b51fd652215f60ce350bac3 jdk8-b64
a2cf4d4a484378caea2e827ed604b2bbae58bdba jdk8-b65
+17820b958ae84f7c1cc6719319c8e2232f7a4f1d jdk8-b66
+76cc9bd3ece407d3a15d3bea537b57927973c5e7 jdk8-b67
--- a/.hgtags-top-repo Mon Dec 17 08:28:27 2012 -0500
+++ b/.hgtags-top-repo Mon Dec 17 08:30:06 2012 -0500
@@ -187,3 +187,5 @@
3229597524cab4239325bc3602df6c486397a511 jdk8-b63
1c8370a55b305d35353346202bde042ba9e8a9fd jdk8-b64
b772de306dc24c17f7bd1398531ddeb58723b804 jdk8-b65
+13bb8c326e7b7b0b19d78c8088033e3932e3f7ca jdk8-b66
+9a6ec97ec45c1a62d5233cefa91e8390e380e13a jdk8-b67
--- a/common/autoconf/basics.m4 Mon Dec 17 08:28:27 2012 -0500
+++ b/common/autoconf/basics.m4 Mon Dec 17 08:30:06 2012 -0500
@@ -129,7 +129,7 @@
if test "x$READLINK_TESTED" != yes; then
# On MacOSX there is a readlink tool with a different
# purpose than the GNU readlink tool. Check the found readlink.
- ISGNU=`$READLINK --help 2>&1 | $GREP GNU`
+ ISGNU=`$READLINK --version 2>&1 | $GREP GNU`
if test "x$ISGNU" = x; then
# A readlink that we do not know how to use.
# Are there other non-GNU readlinks out there?
@@ -141,20 +141,24 @@
if test "x$READLINK" != x; then
$1=`$READLINK -f [$]$1`
else
+ # Save the current directory for restoring afterwards
STARTDIR=$PWD
COUNTER=0
sym_link_dir=`$DIRNAME [$]$1`
sym_link_file=`$BASENAME [$]$1`
+ # Use the system pwd and not the shell builtin to resolve directory symlinks
+ cd $sym_link_dir
+ cd `$THEPWDCMD`
+ sym_link_dir=`$THEPWDCMD`
+ # Resolve file symlinks
while test $COUNTER -lt 20; do
ISLINK=`$LS -l $sym_link_dir/$sym_link_file | $GREP '\->' | $SED -e 's/.*-> \(.*\)/\1/'`
if test "x$ISLINK" == x; then
# This is not a symbolic link! We are done!
break
fi
- # The link might be relative! We have to use cd to travel safely.
- cd $sym_link_dir
- # ... and we must get the to the absolute path, not one using symbolic links.
- cd `pwd -P`
+ # Again resolve directory symlinks since the target of the just found
+ # link could be in a different directory
cd `$DIRNAME $ISLINK`
sym_link_dir=`$THEPWDCMD`
sym_link_file=`$BASENAME $ISLINK`
@@ -286,7 +290,7 @@
# Where is the source? It is located two levels above the configure script.
CURDIR="$PWD"
cd "$AUTOCONF_DIR/../.."
-SRC_ROOT="`pwd`"
+SRC_ROOT="`$THEPWDCMD`"
if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
PATH_SEP=";"
--- a/common/autoconf/generated-configure.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/common/autoconf/generated-configure.sh Mon Dec 17 08:30:06 2012 -0500
@@ -708,6 +708,7 @@
ac_ct_PROPER_COMPILER_CXX
PROPER_COMPILER_CXX
POTENTIAL_CXX
+TOOLS_DIR_CXX
OBJEXT
EXEEXT
ac_ct_CC
@@ -718,6 +719,7 @@
ac_ct_PROPER_COMPILER_CC
PROPER_COMPILER_CC
POTENTIAL_CC
+TOOLS_DIR_CC
BUILD_LD
BUILD_CXX
BUILD_CC
@@ -3672,7 +3674,7 @@
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
-DATE_WHEN_GENERATED=1354106772
+DATE_WHEN_GENERATED=1355221914
###############################################################################
#
@@ -6903,7 +6905,7 @@
if test "x$READLINK_TESTED" != yes; then
# On MacOSX there is a readlink tool with a different
# purpose than the GNU readlink tool. Check the found readlink.
- ISGNU=`$READLINK --help 2>&1 | $GREP GNU`
+ ISGNU=`$READLINK --version 2>&1 | $GREP GNU`
if test "x$ISGNU" = x; then
# A readlink that we do not know how to use.
# Are there other non-GNU readlinks out there?
@@ -6915,20 +6917,24 @@
if test "x$READLINK" != x; then
SCRIPT=`$READLINK -f $SCRIPT`
else
+ # Save the current directory for restoring afterwards
STARTDIR=$PWD
COUNTER=0
sym_link_dir=`$DIRNAME $SCRIPT`
sym_link_file=`$BASENAME $SCRIPT`
+ # Use the system pwd and not the shell builtin to resolve directory symlinks
+ cd $sym_link_dir
+ cd `$THEPWDCMD`
+ sym_link_dir=`$THEPWDCMD`
+ # Resolve file symlinks
while test $COUNTER -lt 20; do
ISLINK=`$LS -l $sym_link_dir/$sym_link_file | $GREP '\->' | $SED -e 's/.*-> \(.*\)/\1/'`
if test "x$ISLINK" == x; then
# This is not a symbolic link! We are done!
break
fi
- # The link might be relative! We have to use cd to travel safely.
- cd $sym_link_dir
- # ... and we must get the to the absolute path, not one using symbolic links.
- cd `pwd -P`
+ # Again resolve directory symlinks since the target of the just found
+ # link could be in a different directory
cd `$DIRNAME $ISLINK`
sym_link_dir=`$THEPWDCMD`
sym_link_file=`$BASENAME $ISLINK`
@@ -6944,7 +6950,7 @@
# Where is the source? It is located two levels above the configure script.
CURDIR="$PWD"
cd "$AUTOCONF_DIR/../.."
-SRC_ROOT="`pwd`"
+SRC_ROOT="`$THEPWDCMD`"
if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
PATH_SEP=";"
@@ -7593,7 +7599,7 @@
if test "x$READLINK_TESTED" != yes; then
# On MacOSX there is a readlink tool with a different
# purpose than the GNU readlink tool. Check the found readlink.
- ISGNU=`$READLINK --help 2>&1 | $GREP GNU`
+ ISGNU=`$READLINK --version 2>&1 | $GREP GNU`
if test "x$ISGNU" = x; then
# A readlink that we do not know how to use.
# Are there other non-GNU readlinks out there?
@@ -7605,20 +7611,24 @@
if test "x$READLINK" != x; then
NOSYM_CURDIR=`$READLINK -f $NOSYM_CURDIR`
else
+ # Save the current directory for restoring afterwards
STARTDIR=$PWD
COUNTER=0
sym_link_dir=`$DIRNAME $NOSYM_CURDIR`
sym_link_file=`$BASENAME $NOSYM_CURDIR`
+ # Use the system pwd and not the shell builtin to resolve directory symlinks
+ cd $sym_link_dir
+ cd `$THEPWDCMD`
+ sym_link_dir=`$THEPWDCMD`
+ # Resolve file symlinks
while test $COUNTER -lt 20; do
ISLINK=`$LS -l $sym_link_dir/$sym_link_file | $GREP '\->' | $SED -e 's/.*-> \(.*\)/\1/'`
if test "x$ISLINK" == x; then
# This is not a symbolic link! We are done!
break
fi
- # The link might be relative! We have to use cd to travel safely.
- cd $sym_link_dir
- # ... and we must get the to the absolute path, not one using symbolic links.
- cd `pwd -P`
+ # Again resolve directory symlinks since the target of the just found
+ # link could be in a different directory
cd `$DIRNAME $ISLINK`
sym_link_dir=`$THEPWDCMD`
sym_link_file=`$BASENAME $ISLINK`
@@ -11707,7 +11717,7 @@
if test "x$READLINK_TESTED" != yes; then
# On MacOSX there is a readlink tool with a different
# purpose than the GNU readlink tool. Check the found readlink.
- ISGNU=`$READLINK --help 2>&1 | $GREP GNU`
+ ISGNU=`$READLINK --version 2>&1 | $GREP GNU`
if test "x$ISGNU" = x; then
# A readlink that we do not know how to use.
# Are there other non-GNU readlinks out there?
@@ -11719,20 +11729,24 @@
if test "x$READLINK" != x; then
BINARY=`$READLINK -f $BINARY`
else
+ # Save the current directory for restoring afterwards
STARTDIR=$PWD
COUNTER=0
sym_link_dir=`$DIRNAME $BINARY`
sym_link_file=`$BASENAME $BINARY`
+ # Use the system pwd and not the shell builtin to resolve directory symlinks
+ cd $sym_link_dir
+ cd `$THEPWDCMD`
+ sym_link_dir=`$THEPWDCMD`
+ # Resolve file symlinks
while test $COUNTER -lt 20; do
ISLINK=`$LS -l $sym_link_dir/$sym_link_file | $GREP '\->' | $SED -e 's/.*-> \(.*\)/\1/'`
if test "x$ISLINK" == x; then
# This is not a symbolic link! We are done!
break
fi
- # The link might be relative! We have to use cd to travel safely.
- cd $sym_link_dir
- # ... and we must get the to the absolute path, not one using symbolic links.
- cd `pwd -P`
+ # Again resolve directory symlinks since the target of the just found
+ # link could be in a different directory
cd `$DIRNAME $ISLINK`
sym_link_dir=`$THEPWDCMD`
sym_link_file=`$BASENAME $ISLINK`
@@ -15827,7 +15841,14 @@
fi
if test -d "$WIN_SDK_BASE"; then
- if test -f "$WIN_SDK_BASE/SetEnv.Cmd"; then
+ # There have been cases of partial or broken SDK installations. A missing
+ # lib dir is not going to work.
+ if test ! -d "$WIN_SDK_BASE/../lib"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: Found Windows SDK installation at $WIN_SDK_BASE using $METHOD" >&5
+$as_echo "$as_me: Found Windows SDK installation at $WIN_SDK_BASE using $METHOD" >&6;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: Warning: Installation is broken, lib dir is missing. Ignoring" >&5
+$as_echo "$as_me: Warning: Installation is broken, lib dir is missing. Ignoring" >&6;}
+ elif test -f "$WIN_SDK_BASE/SetEnv.Cmd"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: Found Windows SDK installation at $WIN_SDK_BASE using $METHOD" >&5
$as_echo "$as_me: Found Windows SDK installation at $WIN_SDK_BASE using $METHOD" >&6;}
VS_ENV_CMD="$WIN_SDK_BASE/SetEnv.Cmd"
@@ -15862,7 +15883,14 @@
fi
if test -d "$WIN_SDK_BASE"; then
- if test -f "$WIN_SDK_BASE/SetEnv.Cmd"; then
+ # There have been cases of partial or broken SDK installations. A missing
+ # lib dir is not going to work.
+ if test ! -d "$WIN_SDK_BASE/../lib"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: Found Windows SDK installation at $WIN_SDK_BASE using $METHOD" >&5
+$as_echo "$as_me: Found Windows SDK installation at $WIN_SDK_BASE using $METHOD" >&6;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: Warning: Installation is broken, lib dir is missing. Ignoring" >&5
+$as_echo "$as_me: Warning: Installation is broken, lib dir is missing. Ignoring" >&6;}
+ elif test -f "$WIN_SDK_BASE/SetEnv.Cmd"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: Found Windows SDK installation at $WIN_SDK_BASE using $METHOD" >&5
$as_echo "$as_me: Found Windows SDK installation at $WIN_SDK_BASE using $METHOD" >&6;}
VS_ENV_CMD="$WIN_SDK_BASE/SetEnv.Cmd"
@@ -15897,7 +15925,14 @@
fi
if test -d "$WIN_SDK_BASE"; then
- if test -f "$WIN_SDK_BASE/SetEnv.Cmd"; then
+ # There have been cases of partial or broken SDK installations. A missing
+ # lib dir is not going to work.
+ if test ! -d "$WIN_SDK_BASE/../lib"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: Found Windows SDK installation at $WIN_SDK_BASE using $METHOD" >&5
+$as_echo "$as_me: Found Windows SDK installation at $WIN_SDK_BASE using $METHOD" >&6;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: Warning: Installation is broken, lib dir is missing. Ignoring" >&5
+$as_echo "$as_me: Warning: Installation is broken, lib dir is missing. Ignoring" >&6;}
+ elif test -f "$WIN_SDK_BASE/SetEnv.Cmd"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: Found Windows SDK installation at $WIN_SDK_BASE using $METHOD" >&5
$as_echo "$as_me: Found Windows SDK installation at $WIN_SDK_BASE using $METHOD" >&6;}
VS_ENV_CMD="$WIN_SDK_BASE/SetEnv.Cmd"
@@ -15931,7 +15966,14 @@
fi
if test -d "$WIN_SDK_BASE"; then
- if test -f "$WIN_SDK_BASE/SetEnv.Cmd"; then
+ # There have been cases of partial or broken SDK installations. A missing
+ # lib dir is not going to work.
+ if test ! -d "$WIN_SDK_BASE/../lib"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: Found Windows SDK installation at $WIN_SDK_BASE using $METHOD" >&5
+$as_echo "$as_me: Found Windows SDK installation at $WIN_SDK_BASE using $METHOD" >&6;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: Warning: Installation is broken, lib dir is missing. Ignoring" >&5
+$as_echo "$as_me: Warning: Installation is broken, lib dir is missing. Ignoring" >&6;}
+ elif test -f "$WIN_SDK_BASE/SetEnv.Cmd"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: Found Windows SDK installation at $WIN_SDK_BASE using $METHOD" >&5
$as_echo "$as_me: Found Windows SDK installation at $WIN_SDK_BASE using $METHOD" >&6;}
VS_ENV_CMD="$WIN_SDK_BASE/SetEnv.Cmd"
@@ -15964,7 +16006,14 @@
fi
if test -d "$WIN_SDK_BASE"; then
- if test -f "$WIN_SDK_BASE/SetEnv.Cmd"; then
+ # There have been cases of partial or broken SDK installations. A missing
+ # lib dir is not going to work.
+ if test ! -d "$WIN_SDK_BASE/../lib"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: Found Windows SDK installation at $WIN_SDK_BASE using $METHOD" >&5
+$as_echo "$as_me: Found Windows SDK installation at $WIN_SDK_BASE using $METHOD" >&6;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: Warning: Installation is broken, lib dir is missing. Ignoring" >&5
+$as_echo "$as_me: Warning: Installation is broken, lib dir is missing. Ignoring" >&6;}
+ elif test -f "$WIN_SDK_BASE/SetEnv.Cmd"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: Found Windows SDK installation at $WIN_SDK_BASE using $METHOD" >&5
$as_echo "$as_me: Found Windows SDK installation at $WIN_SDK_BASE using $METHOD" >&6;}
VS_ENV_CMD="$WIN_SDK_BASE/SetEnv.Cmd"
@@ -16434,8 +16483,9 @@
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5
$as_echo "ok" >&6; }
- VS_INCLUDE="$INCLUDE"
- VS_LIB="$LIB"
+ # Remove any trailing \ from INCLUDE and LIB to avoid trouble in spec.gmk.
+ VS_INCLUDE=`$ECHO "$INCLUDE" | $SED 's/\\\\$//'`
+ VS_LIB=`$ECHO "$LIB" | $SED 's/\\\\$//'`
VS_PATH="$PATH"
@@ -17738,10 +17788,65 @@
COMPILER_NAME=C
- # Do a first initial attempt at searching the list of compiler names.
+ CC=
+ # If TOOLS_DIR is set, check for all compiler names in there first
+ # before checking the rest of the PATH.
+ if test -n "$TOOLS_DIR"; then
+ PATH_save="$PATH"
+ PATH="$TOOLS_DIR"
+ for ac_prog in $COMPILER_CHECK_LIST
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_path_TOOLS_DIR_CC+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $TOOLS_DIR_CC in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_TOOLS_DIR_CC="$TOOLS_DIR_CC" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_path_TOOLS_DIR_CC="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+fi
+TOOLS_DIR_CC=$ac_cv_path_TOOLS_DIR_CC
+if test -n "$TOOLS_DIR_CC"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $TOOLS_DIR_CC" >&5
+$as_echo "$TOOLS_DIR_CC" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$TOOLS_DIR_CC" && break
+done
+
+ CC=$TOOLS_DIR_CC
+ PATH="$PATH_save"
+ fi
+
# AC_PATH_PROGS can't be run multiple times with the same variable,
# so create a new name for this run.
- for ac_prog in $COMPILER_CHECK_LIST
+ if test "x$CC" = x; then
+ for ac_prog in $COMPILER_CHECK_LIST
do
# Extract the first word of "$ac_prog", so it can be a program name with args.
set dummy $ac_prog; ac_word=$2
@@ -17786,9 +17891,10 @@
test -n "$POTENTIAL_CC" && break
done
- CC=$POTENTIAL_CC
-
- if test "x$$CC" = x; then
+ CC=$POTENTIAL_CC
+ fi
+
+ if test "x$CC" = x; then
# Print a helpful message on how to acquire the necessary build dependency.
# devkit is the help tag: freetyp2, cups, pulse, alsa etc
@@ -18082,7 +18188,7 @@
if test "x$READLINK_TESTED" != yes; then
# On MacOSX there is a readlink tool with a different
# purpose than the GNU readlink tool. Check the found readlink.
- ISGNU=`$READLINK --help 2>&1 | $GREP GNU`
+ ISGNU=`$READLINK --version 2>&1 | $GREP GNU`
if test "x$ISGNU" = x; then
# A readlink that we do not know how to use.
# Are there other non-GNU readlinks out there?
@@ -18094,20 +18200,24 @@
if test "x$READLINK" != x; then
TEST_COMPILER=`$READLINK -f $TEST_COMPILER`
else
+ # Save the current directory for restoring afterwards
STARTDIR=$PWD
COUNTER=0
sym_link_dir=`$DIRNAME $TEST_COMPILER`
sym_link_file=`$BASENAME $TEST_COMPILER`
+ # Use the system pwd and not the shell builtin to resolve directory symlinks
+ cd $sym_link_dir
+ cd `$THEPWDCMD`
+ sym_link_dir=`$THEPWDCMD`
+ # Resolve file symlinks
while test $COUNTER -lt 20; do
ISLINK=`$LS -l $sym_link_dir/$sym_link_file | $GREP '\->' | $SED -e 's/.*-> \(.*\)/\1/'`
if test "x$ISLINK" == x; then
# This is not a symbolic link! We are done!
break
fi
- # The link might be relative! We have to use cd to travel safely.
- cd $sym_link_dir
- # ... and we must get the to the absolute path, not one using symbolic links.
- cd `pwd -P`
+ # Again resolve directory symlinks since the target of the just found
+ # link could be in a different directory
cd `$DIRNAME $ISLINK`
sym_link_dir=`$THEPWDCMD`
sym_link_file=`$BASENAME $ISLINK`
@@ -18504,7 +18614,7 @@
if test "x$READLINK_TESTED" != yes; then
# On MacOSX there is a readlink tool with a different
# purpose than the GNU readlink tool. Check the found readlink.
- ISGNU=`$READLINK --help 2>&1 | $GREP GNU`
+ ISGNU=`$READLINK --version 2>&1 | $GREP GNU`
if test "x$ISGNU" = x; then
# A readlink that we do not know how to use.
# Are there other non-GNU readlinks out there?
@@ -18516,20 +18626,24 @@
if test "x$READLINK" != x; then
PROPER_COMPILER_CC=`$READLINK -f $PROPER_COMPILER_CC`
else
+ # Save the current directory for restoring afterwards
STARTDIR=$PWD
COUNTER=0
sym_link_dir=`$DIRNAME $PROPER_COMPILER_CC`
sym_link_file=`$BASENAME $PROPER_COMPILER_CC`
+ # Use the system pwd and not the shell builtin to resolve directory symlinks
+ cd $sym_link_dir
+ cd `$THEPWDCMD`
+ sym_link_dir=`$THEPWDCMD`
+ # Resolve file symlinks
while test $COUNTER -lt 20; do
ISLINK=`$LS -l $sym_link_dir/$sym_link_file | $GREP '\->' | $SED -e 's/.*-> \(.*\)/\1/'`
if test "x$ISLINK" == x; then
# This is not a symbolic link! We are done!
break
fi
- # The link might be relative! We have to use cd to travel safely.
- cd $sym_link_dir
- # ... and we must get the to the absolute path, not one using symbolic links.
- cd `pwd -P`
+ # Again resolve directory symlinks since the target of the just found
+ # link could be in a different directory
cd `$DIRNAME $ISLINK`
sym_link_dir=`$THEPWDCMD`
sym_link_file=`$BASENAME $ISLINK`
@@ -19221,10 +19335,65 @@
COMPILER_NAME=C++
- # Do a first initial attempt at searching the list of compiler names.
+ CXX=
+ # If TOOLS_DIR is set, check for all compiler names in there first
+ # before checking the rest of the PATH.
+ if test -n "$TOOLS_DIR"; then
+ PATH_save="$PATH"
+ PATH="$TOOLS_DIR"
+ for ac_prog in $COMPILER_CHECK_LIST
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_path_TOOLS_DIR_CXX+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $TOOLS_DIR_CXX in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_TOOLS_DIR_CXX="$TOOLS_DIR_CXX" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_path_TOOLS_DIR_CXX="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+fi
+TOOLS_DIR_CXX=$ac_cv_path_TOOLS_DIR_CXX
+if test -n "$TOOLS_DIR_CXX"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $TOOLS_DIR_CXX" >&5
+$as_echo "$TOOLS_DIR_CXX" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$TOOLS_DIR_CXX" && break
+done
+
+ CXX=$TOOLS_DIR_CXX
+ PATH="$PATH_save"
+ fi
+
# AC_PATH_PROGS can't be run multiple times with the same variable,
# so create a new name for this run.
- for ac_prog in $COMPILER_CHECK_LIST
+ if test "x$CXX" = x; then
+ for ac_prog in $COMPILER_CHECK_LIST
do
# Extract the first word of "$ac_prog", so it can be a program name with args.
set dummy $ac_prog; ac_word=$2
@@ -19269,9 +19438,10 @@
test -n "$POTENTIAL_CXX" && break
done
- CXX=$POTENTIAL_CXX
-
- if test "x$$CXX" = x; then
+ CXX=$POTENTIAL_CXX
+ fi
+
+ if test "x$CXX" = x; then
# Print a helpful message on how to acquire the necessary build dependency.
# devkit is the help tag: freetyp2, cups, pulse, alsa etc
@@ -19565,7 +19735,7 @@
if test "x$READLINK_TESTED" != yes; then
# On MacOSX there is a readlink tool with a different
# purpose than the GNU readlink tool. Check the found readlink.
- ISGNU=`$READLINK --help 2>&1 | $GREP GNU`
+ ISGNU=`$READLINK --version 2>&1 | $GREP GNU`
if test "x$ISGNU" = x; then
# A readlink that we do not know how to use.
# Are there other non-GNU readlinks out there?
@@ -19577,20 +19747,24 @@
if test "x$READLINK" != x; then
TEST_COMPILER=`$READLINK -f $TEST_COMPILER`
else
+ # Save the current directory for restoring afterwards
STARTDIR=$PWD
COUNTER=0
sym_link_dir=`$DIRNAME $TEST_COMPILER`
sym_link_file=`$BASENAME $TEST_COMPILER`
+ # Use the system pwd and not the shell builtin to resolve directory symlinks
+ cd $sym_link_dir
+ cd `$THEPWDCMD`
+ sym_link_dir=`$THEPWDCMD`
+ # Resolve file symlinks
while test $COUNTER -lt 20; do
ISLINK=`$LS -l $sym_link_dir/$sym_link_file | $GREP '\->' | $SED -e 's/.*-> \(.*\)/\1/'`
if test "x$ISLINK" == x; then
# This is not a symbolic link! We are done!
break
fi
- # The link might be relative! We have to use cd to travel safely.
- cd $sym_link_dir
- # ... and we must get the to the absolute path, not one using symbolic links.
- cd `pwd -P`
+ # Again resolve directory symlinks since the target of the just found
+ # link could be in a different directory
cd `$DIRNAME $ISLINK`
sym_link_dir=`$THEPWDCMD`
sym_link_file=`$BASENAME $ISLINK`
@@ -19987,7 +20161,7 @@
if test "x$READLINK_TESTED" != yes; then
# On MacOSX there is a readlink tool with a different
# purpose than the GNU readlink tool. Check the found readlink.
- ISGNU=`$READLINK --help 2>&1 | $GREP GNU`
+ ISGNU=`$READLINK --version 2>&1 | $GREP GNU`
if test "x$ISGNU" = x; then
# A readlink that we do not know how to use.
# Are there other non-GNU readlinks out there?
@@ -19999,20 +20173,24 @@
if test "x$READLINK" != x; then
PROPER_COMPILER_CXX=`$READLINK -f $PROPER_COMPILER_CXX`
else
+ # Save the current directory for restoring afterwards
STARTDIR=$PWD
COUNTER=0
sym_link_dir=`$DIRNAME $PROPER_COMPILER_CXX`
sym_link_file=`$BASENAME $PROPER_COMPILER_CXX`
+ # Use the system pwd and not the shell builtin to resolve directory symlinks
+ cd $sym_link_dir
+ cd `$THEPWDCMD`
+ sym_link_dir=`$THEPWDCMD`
+ # Resolve file symlinks
while test $COUNTER -lt 20; do
ISLINK=`$LS -l $sym_link_dir/$sym_link_file | $GREP '\->' | $SED -e 's/.*-> \(.*\)/\1/'`
if test "x$ISLINK" == x; then
# This is not a symbolic link! We are done!
break
fi
- # The link might be relative! We have to use cd to travel safely.
- cd $sym_link_dir
- # ... and we must get the to the absolute path, not one using symbolic links.
- cd `pwd -P`
+ # Again resolve directory symlinks since the target of the just found
+ # link could be in a different directory
cd `$DIRNAME $ISLINK`
sym_link_dir=`$THEPWDCMD`
sym_link_file=`$BASENAME $ISLINK`
@@ -27642,34 +27820,34 @@
# ENABLE_DEBUG_SYMBOLS
# This must be done after the toolchain is setup, since we're looking at objcopy.
#
-ENABLE_DEBUG_SYMBOLS=default
-
-# default on macosx is no...
-if test "x$OPENJDK_TARGET_OS" = xmacosx; then
- ENABLE_DEBUG_SYMBOLS=no
-fi
-
# Check whether --enable-debug-symbols was given.
if test "${enable_debug_symbols+set}" = set; then :
- enableval=$enable_debug_symbols; ENABLE_DEBUG_SYMBOLS=${enable_debug_symbols}
+ enableval=$enable_debug_symbols;
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if we should generate debug symbols" >&5
$as_echo_n "checking if we should generate debug symbols... " >&6; }
-if test "x$ENABLE_DEBUG_SYMBOLS" = "xyes" && test "x$OBJCOPY" = x; then
+if test "x$enable_debug_symbols" = "xyes" && test "x$OBJCOPY" = x; then
# explicit enabling of enable-debug-symbols and can't find objcopy
# this is an error
as_fn_error $? "Unable to find objcopy, cannot enable debug-symbols" "$LINENO" 5
fi
-if test "x$ENABLE_DEBUG_SYMBOLS" = "xdefault"; then
+if test "x$enable_debug_symbols" = "xyes"; then
+ ENABLE_DEBUG_SYMBOLS=true
+elif test "x$enable_debug_symbols" = "xno"; then
+ ENABLE_DEBUG_SYMBOLS=false
+else
+ # default on macosx is false
+ if test "x$OPENJDK_TARGET_OS" = xmacosx; then
+ ENABLE_DEBUG_SYMBOLS=false
# Default is on if objcopy is found, otherwise off
- if test "x$OBJCOPY" != x || test "x$OPENJDK_TARGET_OS" = xwindows; then
- ENABLE_DEBUG_SYMBOLS=yes
- else
- ENABLE_DEBUG_SYMBOLS=no
+ elif test "x$OBJCOPY" != x || test "x$OPENJDK_TARGET_OS" = xwindows; then
+ ENABLE_DEBUG_SYMBOLS=true
+ else
+ ENABLE_DEBUG_SYMBOLS=false
fi
fi
@@ -27679,25 +27857,21 @@
#
# ZIP_DEBUGINFO_FILES
#
-ZIP_DEBUGINFO_FILES=yes
-
# Check whether --enable-zip-debug-info was given.
if test "${enable_zip_debug_info+set}" = set; then :
- enableval=$enable_zip_debug_info; ZIP_DEBUGINFO_FILES=${enable_zip_debug_info}
+ enableval=$enable_zip_debug_info;
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if we should zip debug-info files" >&5
$as_echo_n "checking if we should zip debug-info files... " >&6; }
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ZIP_DEBUGINFO_FILES" >&5
-$as_echo "$ZIP_DEBUGINFO_FILES" >&6; }
-
-# Hotspot wants ZIP_DEBUGINFO_FILES to be 1 for yes
-# use that...
-if test "x$ZIP_DEBUGINFO_FILES" = "xyes"; then
- ZIP_DEBUGINFO_FILES=1
-else
- ZIP_DEBUGINFO_FILES=0
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_zip_debug_info}" >&5
+$as_echo "${enable_zip_debug_info}" >&6; }
+
+if test "x${enable_zip_debug_info}" = "xno"; then
+ ZIP_DEBUGINFO_FILES=false
+else
+ ZIP_DEBUGINFO_FILES=true
fi
--- a/common/autoconf/hotspot-spec.gmk.in Mon Dec 17 08:28:27 2012 -0500
+++ b/common/autoconf/hotspot-spec.gmk.in Mon Dec 17 08:30:06 2012 -0500
@@ -97,6 +97,24 @@
USE_PRECOMPILED_HEADER=@USE_PRECOMPILED_HEADER@
+# Hotspot expects the variable FULL_DEBUG_SYMBOLS=1/0 to control debug symbols
+# creation.
+ifeq ($(ENABLE_DEBUG_SYMBOLS), true)
+ FULL_DEBUG_SYMBOLS=1
+ # Ensure hotspot uses the objcopy that configure located
+ ALT_OBJCOPY:=$(OBJCOPY)
+else
+ FULL_DEBUG_SYMBOLS=0
+endif
+
+# Hotspot expects the variable ZIP_DEBUGINFO_FILES=1/0 and not true/false.
+ifeq ($(ZIP_DEBUGINFO_FILES)$(ENABLE_DEBUG_SYMBOLS), truetrue)
+ ZIP_DEBUGINFO_FILES:=1
+endif
+ifeq ($(ZIP_DEBUGINFO_FILES), false)
+ ZIP_DEBUGINFO_FILES:=0
+endif
+
# Sneak this in via the spec.gmk file, since we don't want to mess around too much with the Hotspot make files.
# This is needed to get the LOG setting to work properly.
include $(SRC_ROOT)/common/makefiles/MakeBase.gmk
--- a/common/autoconf/jdk-options.m4 Mon Dec 17 08:28:27 2012 -0500
+++ b/common/autoconf/jdk-options.m4 Mon Dec 17 08:30:06 2012 -0500
@@ -432,32 +432,30 @@
# ENABLE_DEBUG_SYMBOLS
# This must be done after the toolchain is setup, since we're looking at objcopy.
#
-ENABLE_DEBUG_SYMBOLS=default
-
-# default on macosx is no...
-if test "x$OPENJDK_TARGET_OS" = xmacosx; then
- ENABLE_DEBUG_SYMBOLS=no
-fi
-
AC_ARG_ENABLE([debug-symbols],
- [AS_HELP_STRING([--disable-debug-symbols],[disable generation of debug symbols @<:@enabled@:>@])],
- [ENABLE_DEBUG_SYMBOLS=${enable_debug_symbols}],
-)
+ [AS_HELP_STRING([--disable-debug-symbols],[disable generation of debug symbols @<:@enabled@:>@])])
AC_MSG_CHECKING([if we should generate debug symbols])
-if test "x$ENABLE_DEBUG_SYMBOLS" = "xyes" && test "x$OBJCOPY" = x; then
+if test "x$enable_debug_symbols" = "xyes" && test "x$OBJCOPY" = x; then
# explicit enabling of enable-debug-symbols and can't find objcopy
# this is an error
AC_MSG_ERROR([Unable to find objcopy, cannot enable debug-symbols])
fi
-if test "x$ENABLE_DEBUG_SYMBOLS" = "xdefault"; then
+if test "x$enable_debug_symbols" = "xyes"; then
+ ENABLE_DEBUG_SYMBOLS=true
+elif test "x$enable_debug_symbols" = "xno"; then
+ ENABLE_DEBUG_SYMBOLS=false
+else
+ # default on macosx is false
+ if test "x$OPENJDK_TARGET_OS" = xmacosx; then
+ ENABLE_DEBUG_SYMBOLS=false
# Default is on if objcopy is found, otherwise off
- if test "x$OBJCOPY" != x || test "x$OPENJDK_TARGET_OS" = xwindows; then
- ENABLE_DEBUG_SYMBOLS=yes
+ elif test "x$OBJCOPY" != x || test "x$OPENJDK_TARGET_OS" = xwindows; then
+ ENABLE_DEBUG_SYMBOLS=true
else
- ENABLE_DEBUG_SYMBOLS=no
+ ENABLE_DEBUG_SYMBOLS=false
fi
fi
@@ -466,22 +464,16 @@
#
# ZIP_DEBUGINFO_FILES
#
-ZIP_DEBUGINFO_FILES=yes
-
AC_ARG_ENABLE([zip-debug-info],
- [AS_HELP_STRING([--disable-zip-debug-info],[disable zipping of debug-info files @<:@enabled@:>@])],
- [ZIP_DEBUGINFO_FILES=${enable_zip_debug_info}],
-)
+ [AS_HELP_STRING([--disable-zip-debug-info],[disable zipping of debug-info files @<:@enabled@:>@])])
AC_MSG_CHECKING([if we should zip debug-info files])
-AC_MSG_RESULT([$ZIP_DEBUGINFO_FILES])
+AC_MSG_RESULT([${enable_zip_debug_info}])
-# Hotspot wants ZIP_DEBUGINFO_FILES to be 1 for yes
-# use that...
-if test "x$ZIP_DEBUGINFO_FILES" = "xyes"; then
- ZIP_DEBUGINFO_FILES=1
+if test "x${enable_zip_debug_info}" = "xno"; then
+ ZIP_DEBUGINFO_FILES=false
else
- ZIP_DEBUGINFO_FILES=0
+ ZIP_DEBUGINFO_FILES=true
fi
AC_SUBST(ENABLE_DEBUG_SYMBOLS)
--- a/common/autoconf/toolchain.m4 Mon Dec 17 08:28:27 2012 -0500
+++ b/common/autoconf/toolchain.m4 Mon Dec 17 08:30:06 2012 -0500
@@ -114,13 +114,25 @@
[
COMPILER_NAME=$2
- # Do a first initial attempt at searching the list of compiler names.
+ $1=
+ # If TOOLS_DIR is set, check for all compiler names in there first
+ # before checking the rest of the PATH.
+ if test -n "$TOOLS_DIR"; then
+ PATH_save="$PATH"
+ PATH="$TOOLS_DIR"
+ AC_PATH_PROGS(TOOLS_DIR_$1, $3)
+ $1=$TOOLS_DIR_$1
+ PATH="$PATH_save"
+ fi
+
# AC_PATH_PROGS can't be run multiple times with the same variable,
# so create a new name for this run.
- AC_PATH_PROGS(POTENTIAL_$1, $3)
- $1=$POTENTIAL_$1
+ if test "x[$]$1" = x; then
+ AC_PATH_PROGS(POTENTIAL_$1, $3)
+ $1=$POTENTIAL_$1
+ fi
- if test "x$[$]$1" = x; then
+ if test "x[$]$1" = x; then
HELP_MSG_MISSING_DEPENDENCY([devkit])
AC_MSG_ERROR([Could not find a $COMPILER_NAME compiler. $HELP_MSG])
fi
--- a/common/autoconf/toolchain_windows.m4 Mon Dec 17 08:28:27 2012 -0500
+++ b/common/autoconf/toolchain_windows.m4 Mon Dec 17 08:30:06 2012 -0500
@@ -48,7 +48,12 @@
METHOD="$2"
BASIC_WINDOWS_REWRITE_AS_UNIX_PATH(WIN_SDK_BASE)
if test -d "$WIN_SDK_BASE"; then
- if test -f "$WIN_SDK_BASE/SetEnv.Cmd"; then
+ # There have been cases of partial or broken SDK installations. A missing
+ # lib dir is not going to work.
+ if test ! -d "$WIN_SDK_BASE/../lib"; then
+ AC_MSG_NOTICE([Found Windows SDK installation at $WIN_SDK_BASE using $METHOD])
+ AC_MSG_NOTICE([Warning: Installation is broken, lib dir is missing. Ignoring])
+ elif test -f "$WIN_SDK_BASE/SetEnv.Cmd"; then
AC_MSG_NOTICE([Found Windows SDK installation at $WIN_SDK_BASE using $METHOD])
VS_ENV_CMD="$WIN_SDK_BASE/SetEnv.Cmd"
if test "x$OPENJDK_TARGET_CPU_BITS" = x32; then
@@ -200,8 +205,9 @@
AC_MSG_ERROR([Your VC command prompt seems broken, INCLUDE and/or LIB is missing.])
else
AC_MSG_RESULT([ok])
- VS_INCLUDE="$INCLUDE"
- VS_LIB="$LIB"
+ # Remove any trailing \ from INCLUDE and LIB to avoid trouble in spec.gmk.
+ VS_INCLUDE=`$ECHO "$INCLUDE" | $SED 's/\\\\$//'`
+ VS_LIB=`$ECHO "$LIB" | $SED 's/\\\\$//'`
VS_PATH="$PATH"
AC_SUBST(VS_INCLUDE)
AC_SUBST(VS_LIB)
--- a/common/makefiles/JavaCompilation.gmk Mon Dec 17 08:28:27 2012 -0500
+++ b/common/makefiles/JavaCompilation.gmk Mon Dec 17 08:30:06 2012 -0500
@@ -149,7 +149,7 @@
$$($1_GREP_INCLUDES) $$($1_GREP_EXCLUDES)) \
$$(subst \,,$$(foreach src,$$($1_SRCS),$$(addprefix $$(src)/,$$($1_EXTRA_FILES))))
ifeq (,$$($1_SKIP_METAINF))
- $1_DEPS+=$$(shell $(FIND) $$(addsuffix /META-INF,$$($1_SRCS)) -type f 2> /dev/null))
+ $1_DEPS+=$$(shell $(FIND) $$(addsuffix /META-INF,$$($1_SRCS)) -type f 2> /dev/null)
endif
endif
@@ -275,10 +275,12 @@
# Explicitly excluded files can be given with absolute path. The patsubst solution
# isn't perfect but the likelyhood of an absolute path to match something in a src
# dir is very small.
+ # If zip has nothing to do, it returns 12 and would fail the build. Check for 12
+ # and only fail if it's not.
$$($1_ZIP) : $$($1_ALL_SRCS) $$($1_EXTRA_DEPS)
$(MKDIR) -p $$(@D)
$(ECHO) Updating $$($1_NAME)
- $$(foreach i,$$($1_SRC),(cd $$i && $(ZIP) -qru $$@ . $$($1_ZIP_INCLUDES) $$($1_ZIP_EXCLUDES) -x \*_the.\* $$(addprefix -x$(SPACE),$$(patsubst $$i/%,%,$$($1_EXCLUDE_FILES))))$$(NEWLINE)) true
+ $$(foreach i,$$($1_SRC),(cd $$i && $(ZIP) -qru $$@ . $$($1_ZIP_INCLUDES) $$($1_ZIP_EXCLUDES) -x \*_the.\* $$(addprefix -x$(SPACE),$$(patsubst $$i/%,%,$$($1_EXCLUDE_FILES))) || test "$$$$?" = "12" )$$(NEWLINE)) true
$(TOUCH) $$@
endef
--- a/common/makefiles/NativeCompilation.gmk Mon Dec 17 08:28:27 2012 -0500
+++ b/common/makefiles/NativeCompilation.gmk Mon Dec 17 08:30:06 2012 -0500
@@ -302,7 +302,7 @@
endif
ifneq (,$$($1_DEBUG_SYMBOLS))
- ifeq ($(ENABLE_DEBUG_SYMBOLS), yes)
+ ifeq ($(ENABLE_DEBUG_SYMBOLS), true)
# Programs don't get the debug symbols added in the old build. It's not clear if
# this is intentional.
ifeq ($$($1_PROGRAM),)
@@ -394,7 +394,7 @@
endif
ifneq (,$$($1_DEBUG_SYMBOLS))
- ifeq ($(ENABLE_DEBUG_SYMBOLS), yes)
+ ifeq ($(ENABLE_DEBUG_SYMBOLS), true)
ifeq ($(OPENJDK_TARGET_OS), windows)
$1_EXTRA_LDFLAGS+="-pdb:$$($1_OBJECT_DIR)/$$($1_LIBRARY).pdb" \
"-map:$$($1_OBJECT_DIR)/$$($1_LIBRARY).map"
@@ -429,7 +429,7 @@
endif # Touch to not retrigger rule on rebuild
$(TOUCH) $$@
- ifeq ($(ZIP_DEBUGINFO_FILES), 1)
+ ifeq ($(ZIP_DEBUGINFO_FILES), true)
$1 += $$($1_OUTPUT_DIR)/$$(LIBRARY_PREFIX)$$($1_LIBRARY).diz
ifeq ($(OPENJDK_TARGET_OS), windows)
@@ -472,7 +472,7 @@
ifneq (,$$($1_PROGRAM))
# A executable binary has been specified, setup the target for it.
ifneq (,$$($1_DEBUG_SYMBOLS))
- ifeq ($(ENABLE_DEBUG_SYMBOLS), yes)
+ ifeq ($(ENABLE_DEBUG_SYMBOLS), true)
ifeq ($(OPENJDK_TARGET_OS), windows)
$1_EXTRA_LDFLAGS+="-pdb:$$($1_OBJECT_DIR)/$$($1_PROGRAM).pdb" \
"-map:$$($1_OBJECT_DIR)/$$($1_PROGRAM).map"
@@ -507,7 +507,7 @@
endif
$(TOUCH) $$@
- ifeq ($(ZIP_DEBUGINFO_FILES), 1)
+ ifeq ($(ZIP_DEBUGINFO_FILES), true)
$1 += $$($1_OUTPUT_DIR)/$$($1_PROGRAM).diz
ifeq ($(OPENJDK_TARGET_OS), windows)
--- a/corba/.hgtags Mon Dec 17 08:28:27 2012 -0500
+++ b/corba/.hgtags Mon Dec 17 08:30:06 2012 -0500
@@ -187,3 +187,5 @@
6ccbf67b68bfed1ab9c44ab8748a5bdc7df33506 jdk8-b63
54d599a5b4aad83c235d590652fc81f41c2824fb jdk8-b64
5132f7900a8f0c30c3ca7f7a32f9433f4fee7745 jdk8-b65
+65771ad1ca557ca26e4979d4dc633cf685435cb8 jdk8-b66
+394515ad2a55d4d54df990b36065505d3e7a3cbb jdk8-b67
--- a/hotspot/.hgtags Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/.hgtags Mon Dec 17 08:30:06 2012 -0500
@@ -295,3 +295,7 @@
b4ee7b773144a88af8b6b92e4384dea82cb948d8 hs25-b09
0f7290a03b24bd562583fa325d3566c21c51fb94 jdk8-b65
cfc5309f03b7bd6c1567618b63cf1fc74c0f2a8f hs25-b10
+01684f7fee1b86222be69bc23841ec2a4416696c jdk8-b66
+b61d9c88b759d1594b8af1655598e8fa00393672 hs25-b11
+25bdce771bb3a7ae9825261a284d292cda700122 jdk8-b67
+a35a72dd2e1255239d31f796f9f693e49b36bc9f hs25-b12
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstMethod.java Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstMethod.java Mon Dec 17 08:30:06 2012 -0500
@@ -48,6 +48,7 @@
private static int HAS_CHECKED_EXCEPTIONS;
private static int HAS_LOCALVARIABLE_TABLE;
private static int HAS_EXCEPTION_TABLE;
+ private static int HAS_GENERIC_SIGNATURE;
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
Type type = db.lookupType("ConstMethod");
@@ -60,13 +61,14 @@
HAS_CHECKED_EXCEPTIONS = db.lookupIntConstant("ConstMethod::_has_checked_exceptions").intValue();
HAS_LOCALVARIABLE_TABLE = db.lookupIntConstant("ConstMethod::_has_localvariable_table").intValue();
HAS_EXCEPTION_TABLE = db.lookupIntConstant("ConstMethod::_has_exception_table").intValue();
+ HAS_GENERIC_SIGNATURE = db.lookupIntConstant("ConstMethod::_has_generic_signature").intValue();
// Size of Java bytecodes allocated immediately after ConstMethod*.
codeSize = new CIntField(type.getCIntegerField("_code_size"), 0);
nameIndex = new CIntField(type.getCIntegerField("_name_index"), 0);
signatureIndex = new CIntField(type.getCIntegerField("_signature_index"), 0);
- genericSignatureIndex = new CIntField(type.getCIntegerField("_generic_signature_index"),0);
idnum = new CIntField(type.getCIntegerField("_method_idnum"), 0);
+ maxStack = new CIntField(type.getCIntegerField("_max_stack"), 0);
// start of byte code
bytecodeOffset = type.getSize();
@@ -92,8 +94,8 @@
private static CIntField codeSize;
private static CIntField nameIndex;
private static CIntField signatureIndex;
- private static CIntField genericSignatureIndex;
private static CIntField idnum;
+ private static CIntField maxStack;
// start of bytecode
private static long bytecodeOffset;
@@ -134,13 +136,21 @@
}
public long getGenericSignatureIndex() {
- return genericSignatureIndex.getValue(this);
+ if (hasGenericSignature()) {
+ return getAddress().getCIntegerAt(offsetOfGenericSignatureIndex(), 2, true);
+ } else {
+ return 0;
+ }
}
public long getIdNum() {
return idnum.getValue(this);
}
+ public long getMaxStack() {
+ return maxStack.getValue(this);
+ }
+
public Symbol getName() {
return getMethod().getName();
}
@@ -235,8 +245,8 @@
visitor.doCInt(codeSize, true);
visitor.doCInt(nameIndex, true);
visitor.doCInt(signatureIndex, true);
- visitor.doCInt(genericSignatureIndex, true);
visitor.doCInt(codeSize, true);
+ visitor.doCInt(maxStack, true);
}
// Accessors
@@ -353,6 +363,10 @@
return ret;
}
+ private boolean hasGenericSignature() {
+ return (getFlags() & HAS_GENERIC_SIGNATURE) != 0;
+ }
+
//---------------------------------------------------------------------------
// Internals only below this point
@@ -377,8 +391,14 @@
return getSize() * VM.getVM().getObjectHeap().getOopSize() - 2;
}
+ // Offset of the generic signature index
+ private long offsetOfGenericSignatureIndex() {
+ return offsetOfLastU2Element();
+ }
+
private long offsetOfCheckedExceptionsLength() {
- return offsetOfLastU2Element();
+ return hasGenericSignature() ? offsetOfLastU2Element() - 2 :
+ offsetOfLastU2Element();
}
private int getCheckedExceptionsLength() {
@@ -431,7 +451,8 @@
} else if (hasCheckedExceptions()) {
return offsetOfCheckedExceptions() - 2;
} else {
- return offsetOfLastU2Element();
+ return hasGenericSignature() ? offsetOfLastU2Element() - 2 :
+ offsetOfLastU2Element();
}
}
@@ -460,7 +481,8 @@
if (hasCheckedExceptions()) {
return offsetOfCheckedExceptions() - 2;
} else {
- return offsetOfLastU2Element();
+ return hasGenericSignature() ? offsetOfLastU2Element() - 2 :
+ offsetOfLastU2Element();
}
}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java Mon Dec 17 08:30:06 2012 -0500
@@ -50,7 +50,6 @@
constMethod = type.getAddressField("_constMethod");
methodData = type.getAddressField("_method_data");
methodSize = new CIntField(type.getCIntegerField("_method_size"), 0);
- maxStack = new CIntField(type.getCIntegerField("_max_stack"), 0);
maxLocals = new CIntField(type.getCIntegerField("_max_locals"), 0);
sizeOfParameters = new CIntField(type.getCIntegerField("_size_of_parameters"), 0);
accessFlags = new CIntField(type.getCIntegerField("_access_flags"), 0);
@@ -84,7 +83,6 @@
private static AddressField constMethod;
private static AddressField methodData;
private static CIntField methodSize;
- private static CIntField maxStack;
private static CIntField maxLocals;
private static CIntField sizeOfParameters;
private static CIntField accessFlags;
@@ -135,7 +133,7 @@
}
/** WARNING: this is in words, not useful in this system; use getObjectSize() instead */
public long getMethodSize() { return methodSize.getValue(this); }
- public long getMaxStack() { return maxStack.getValue(this); }
+ public long getMaxStack() { return getConstMethod().getMaxStack(); }
public long getMaxLocals() { return maxLocals.getValue(this); }
public long getSizeOfParameters() { return sizeOfParameters.getValue(this); }
public long getNameIndex() { return getConstMethod().getNameIndex(); }
@@ -284,7 +282,6 @@
public void iterateFields(MetadataVisitor visitor) {
visitor.doCInt(methodSize, true);
- visitor.doCInt(maxStack, true);
visitor.doCInt(maxLocals, true);
visitor.doCInt(sizeOfParameters, true);
visitor.doCInt(accessFlags, true);
--- a/hotspot/make/hotspot_version Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/make/hotspot_version Mon Dec 17 08:30:06 2012 -0500
@@ -35,7 +35,7 @@
HS_MAJOR_VER=25
HS_MINOR_VER=0
-HS_BUILD_NUMBER=10
+HS_BUILD_NUMBER=12
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
--- a/hotspot/make/windows/projectfiles/common/Makefile Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/make/windows/projectfiles/common/Makefile Mon Dec 17 08:30:06 2012 -0500
@@ -71,41 +71,36 @@
!include $(HOTSPOTWORKSPACE)/make/hotspot_version
-!if "$(HOTSPOT_RELEASE_VERSION)" != ""
-HOTSPOT_RELEASE_VERSION="$(HOTSPOT_RELEASE_VERSION)"
+!if "$(USER_RELEASE_SUFFIX)" != ""
+HOTSPOT_BUILD_VERSION = internal-$(USER_RELEASE_SUFFIX)
!else
-HOTSPOT_RELEASE_VERSION="$(HS_MAJOR_VER).$(HS_MINOR_VER)-b$(HS_BUILD_NUMBER)"
+HOTSPOT_BUILD_VERSION = internal
!endif
-!if "$(USER_RELEASE_SUFFIX)" != ""
-HOTSPOT_BUILD_VERSION$(HOTSPOT_BUILD_VERSION) = internal-$(USER_RELEASE_SUFFIX)
+!if "$(HOTSPOT_RELEASE_VERSION)" != ""
+HOTSPOT_RELEASE_VERSION="\\\"$(HOTSPOT_RELEASE_VERSION)\\\""
!else
-HOTSPOT_BUILD_VERSION$(HOTSPOT_BUILD_VERSION) = internal
-!endif
-!if "$(HOTSPOT_BUILD_VERSION)" != ""
-HOTSPOT_RELEASE_VERSION="$(HOTSPOT_RELEASE_VERSION)-$(HOTSPOT_BUILD_VERSION)"
+HOTSPOT_RELEASE_VERSION="\\\"$(HS_MAJOR_VER).$(HS_MINOR_VER)-b$(HS_BUILD_NUMBER)-$(HOTSPOT_BUILD_VERSION)\\\""
!endif
!if "$(JRE_RELEASE_VERSION)" != ""
-JRE_RELEASE_VERSION="$(JRE_RELEASE_VERSION)"
+JRE_RELEASE_VERSION="\\\"$(JRE_RELEASE_VERSION)\\\""
!else
-JRE_RELEASE_VERSION="$(JDK_MAJOR_VER).$(JDK_MINOR_VER).$(JDK_MICRO_VER)"
+JRE_RELEASE_VERSION="\\\"$(JDK_MAJOR_VER).$(JDK_MINOR_VER).$(JDK_MICRO_VER)\\\""
!endif
# Define HOTSPOT_VM_DISTRO if HOTSPOT_VM_DISTRO is set,
# and if it is not see if we have the src/closed directory
!if "$(HOTSPOT_VM_DISTRO)" != ""
-HOTSPOT_VM_DISTRO="$(HOTSPOT_VM_DISTRO)"
+HOTSPOT_VM_DISTRO=$(HOTSPOT_VM_DISTRO)
!else
!if exists($(HOTSPOTWORKSPACE)\src\closed)
-HOTSPOT_VM_DISTRO="Java HotSpot(TM)"
+HOTSPOT_VM_DISTRO="\\\"Java HotSpot(TM)\\\""
!else
-HOTSPOT_VM_DISTRO="OpenJDK"
+HOTSPOT_VM_DISTRO="\\\"OpenJDK\\\""
!endif
!endif
-ProjectCreatorIDEOptions = $(ProjectCreatorIDEOptions) \
- -define HOTSPOT_RELEASE_VERSION=\\\"$(HOTSPOT_RELEASE_VERSION)\\\" \
- -define JRE_RELEASE_VERSION=\\\"$(JRE_RELEASE_VERSION)\\\" \
- -define HOTSPOT_VM_DISTRO=\\\"$(HOTSPOT_VM_DISTRO)\\\"
+ReleaseOptions = -define HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) -define JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION) -define HOTSPOT_VM_DISTRO=$(HOTSPOT_VM_DISTRO)
+ProjectCreatorIDEOptions = $(ProjectCreatorIDEOptions) $(ReleaseOptions)
$(HOTSPOTBUILDSPACE)/$(ProjectFile): $(HOTSPOTBUILDSPACE)/classes/ProjectCreator.class
@$(RUN_JAVA) -Djava.class.path="$(HOTSPOTBUILDSPACE)/classes" ProjectCreator WinGammaPlatform$(VcVersion) $(ProjectCreatorIDEOptions)
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -24,4985 +24,8 @@
#include "precompiled.hpp"
#include "asm/assembler.hpp"
-#include "assembler_sparc.inline.hpp"
-#include "gc_interface/collectedHeap.inline.hpp"
-#include "interpreter/interpreter.hpp"
-#include "memory/cardTableModRefBS.hpp"
-#include "memory/resourceArea.hpp"
-#include "prims/methodHandles.hpp"
-#include "runtime/biasedLocking.hpp"
-#include "runtime/interfaceSupport.hpp"
-#include "runtime/objectMonitor.hpp"
-#include "runtime/os.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#ifndef SERIALGC
-#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
-#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
-#include "gc_implementation/g1/heapRegion.hpp"
-#endif
-
-#ifdef PRODUCT
-#define BLOCK_COMMENT(str) /* nothing */
-#define STOP(error) stop(error)
-#else
-#define BLOCK_COMMENT(str) block_comment(str)
-#define STOP(error) block_comment(error); stop(error)
-#endif
-
-// Convert the raw encoding form into the form expected by the
-// constructor for Address.
-Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
- assert(scale == 0, "not supported");
- RelocationHolder rspec;
- if (disp_reloc != relocInfo::none) {
- rspec = Relocation::spec_simple(disp_reloc);
- }
-
- Register rindex = as_Register(index);
- if (rindex != G0) {
- Address madr(as_Register(base), rindex);
- madr._rspec = rspec;
- return madr;
- } else {
- Address madr(as_Register(base), disp);
- madr._rspec = rspec;
- return madr;
- }
-}
-
-Address Argument::address_in_frame() const {
- // Warning: In LP64 mode disp will occupy more than 10 bits, but
- // op codes such as ld or ldx, only access disp() to get
- // their simm13 argument.
- int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS;
- if (is_in())
- return Address(FP, disp); // In argument.
- else
- return Address(SP, disp); // Out argument.
-}
-
-static const char* argumentNames[][2] = {
- {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"},
- {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"},
- {"A(n>9)","P(n>9)"}
-};
-
-const char* Argument::name() const {
- int nofArgs = sizeof argumentNames / sizeof argumentNames[0];
- int num = number();
- if (num >= nofArgs) num = nofArgs - 1;
- return argumentNames[num][is_in() ? 1 : 0];
-}
-
-void Assembler::print_instruction(int inst) {
- const char* s;
- switch (inv_op(inst)) {
- default: s = "????"; break;
- case call_op: s = "call"; break;
- case branch_op:
- switch (inv_op2(inst)) {
- case fb_op2: s = "fb"; break;
- case fbp_op2: s = "fbp"; break;
- case br_op2: s = "br"; break;
- case bp_op2: s = "bp"; break;
- case cb_op2: s = "cb"; break;
- case bpr_op2: {
- if (is_cbcond(inst)) {
- s = is_cxb(inst) ? "cxb" : "cwb";
- } else {
- s = "bpr";
- }
- break;
- }
- default: s = "????"; break;
- }
- }
- ::tty->print("%s", s);
-}
-
-
-// Patch instruction inst at offset inst_pos to refer to dest_pos
-// and return the resulting instruction.
-// We should have pcs, not offsets, but since all is relative, it will work out
-// OK.
-int Assembler::patched_branch(int dest_pos, int inst, int inst_pos) {
-
- int m; // mask for displacement field
- int v; // new value for displacement field
- const int word_aligned_ones = -4;
- switch (inv_op(inst)) {
- default: ShouldNotReachHere();
- case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break;
- case branch_op:
- switch (inv_op2(inst)) {
- case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break;
- case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break;
- case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
- case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
- case cb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
- case bpr_op2: {
- if (is_cbcond(inst)) {
- m = wdisp10(word_aligned_ones, 0);
- v = wdisp10(dest_pos, inst_pos);
- } else {
- m = wdisp16(word_aligned_ones, 0);
- v = wdisp16(dest_pos, inst_pos);
- }
- break;
- }
- default: ShouldNotReachHere();
- }
- }
- return inst & ~m | v;
-}
-
-// Return the offset of the branch destionation of instruction inst
-// at offset pos.
-// Should have pcs, but since all is relative, it works out.
-int Assembler::branch_destination(int inst, int pos) {
- int r;
- switch (inv_op(inst)) {
- default: ShouldNotReachHere();
- case call_op: r = inv_wdisp(inst, pos, 30); break;
- case branch_op:
- switch (inv_op2(inst)) {
- case fbp_op2: r = inv_wdisp( inst, pos, 19); break;
- case bp_op2: r = inv_wdisp( inst, pos, 19); break;
- case fb_op2: r = inv_wdisp( inst, pos, 22); break;
- case br_op2: r = inv_wdisp( inst, pos, 22); break;
- case cb_op2: r = inv_wdisp( inst, pos, 22); break;
- case bpr_op2: {
- if (is_cbcond(inst)) {
- r = inv_wdisp10(inst, pos);
- } else {
- r = inv_wdisp16(inst, pos);
- }
- break;
- }
- default: ShouldNotReachHere();
- }
- }
- return r;
-}
+#include "asm/assembler.inline.hpp"
int AbstractAssembler::code_fill_byte() {
return 0x00; // illegal instruction 0x00000000
}
-
-Assembler::Condition Assembler::reg_cond_to_cc_cond(Assembler::RCondition in) {
- switch (in) {
- case rc_z: return equal;
- case rc_lez: return lessEqual;
- case rc_lz: return less;
- case rc_nz: return notEqual;
- case rc_gz: return greater;
- case rc_gez: return greaterEqual;
- default:
- ShouldNotReachHere();
- }
- return equal;
-}
-
-// Generate a bunch 'o stuff (including v9's
-#ifndef PRODUCT
-void Assembler::test_v9() {
- add( G0, G1, G2 );
- add( G3, 0, G4 );
-
- addcc( G5, G6, G7 );
- addcc( I0, 1, I1 );
- addc( I2, I3, I4 );
- addc( I5, -1, I6 );
- addccc( I7, L0, L1 );
- addccc( L2, (1 << 12) - 2, L3 );
-
- Label lbl1, lbl2, lbl3;
-
- bind(lbl1);
-
- bpr( rc_z, true, pn, L4, pc(), relocInfo::oop_type );
- delayed()->nop();
- bpr( rc_lez, false, pt, L5, lbl1);
- delayed()->nop();
-
- fb( f_never, true, pc() + 4, relocInfo::none);
- delayed()->nop();
- fb( f_notEqual, false, lbl2 );
- delayed()->nop();
-
- fbp( f_notZero, true, fcc0, pn, pc() - 4, relocInfo::none);
- delayed()->nop();
- fbp( f_lessOrGreater, false, fcc1, pt, lbl3 );
- delayed()->nop();
-
- br( equal, true, pc() + 1024, relocInfo::none);
- delayed()->nop();
- br( lessEqual, false, lbl1 );
- delayed()->nop();
- br( never, false, lbl1 );
- delayed()->nop();
-
- bp( less, true, icc, pn, pc(), relocInfo::none);
- delayed()->nop();
- bp( lessEqualUnsigned, false, xcc, pt, lbl2 );
- delayed()->nop();
-
- call( pc(), relocInfo::none);
- delayed()->nop();
- call( lbl3 );
- delayed()->nop();
-
-
- casa( L6, L7, O0 );
- casxa( O1, O2, O3, 0 );
-
- udiv( O4, O5, O7 );
- udiv( G0, (1 << 12) - 1, G1 );
- sdiv( G1, G2, G3 );
- sdiv( G4, -((1 << 12) - 1), G5 );
- udivcc( G6, G7, I0 );
- udivcc( I1, -((1 << 12) - 2), I2 );
- sdivcc( I3, I4, I5 );
- sdivcc( I6, -((1 << 12) - 0), I7 );
-
- done();
- retry();
-
- fadd( FloatRegisterImpl::S, F0, F1, F2 );
- fsub( FloatRegisterImpl::D, F34, F0, F62 );
-
- fcmp( FloatRegisterImpl::Q, fcc0, F0, F60);
- fcmpe( FloatRegisterImpl::S, fcc1, F31, F30);
-
- ftox( FloatRegisterImpl::D, F2, F4 );
- ftoi( FloatRegisterImpl::Q, F4, F8 );
-
- ftof( FloatRegisterImpl::S, FloatRegisterImpl::Q, F3, F12 );
-
- fxtof( FloatRegisterImpl::S, F4, F5 );
- fitof( FloatRegisterImpl::D, F6, F8 );
-
- fmov( FloatRegisterImpl::Q, F16, F20 );
- fneg( FloatRegisterImpl::S, F6, F7 );
- fabs( FloatRegisterImpl::D, F10, F12 );
-
- fmul( FloatRegisterImpl::Q, F24, F28, F32 );
- fmul( FloatRegisterImpl::S, FloatRegisterImpl::D, F8, F9, F14 );
- fdiv( FloatRegisterImpl::S, F10, F11, F12 );
-
- fsqrt( FloatRegisterImpl::S, F13, F14 );
-
- flush( L0, L1 );
- flush( L2, -1 );
-
- flushw();
-
- illtrap( (1 << 22) - 2);
-
- impdep1( 17, (1 << 19) - 1 );
- impdep2( 3, 0 );
-
- jmpl( L3, L4, L5 );
- delayed()->nop();
- jmpl( L6, -1, L7, Relocation::spec_simple(relocInfo::none));
- delayed()->nop();
-
-
- ldf( FloatRegisterImpl::S, O0, O1, F15 );
- ldf( FloatRegisterImpl::D, O2, -1, F14 );
-
-
- ldfsr( O3, O4 );
- ldfsr( O5, -1 );
- ldxfsr( O6, O7 );
- ldxfsr( I0, -1 );
-
- ldfa( FloatRegisterImpl::D, I1, I2, 1, F16 );
- ldfa( FloatRegisterImpl::Q, I3, -1, F36 );
-
- ldsb( I4, I5, I6 );
- ldsb( I7, -1, G0 );
- ldsh( G1, G3, G4 );
- ldsh( G5, -1, G6 );
- ldsw( G7, L0, L1 );
- ldsw( L2, -1, L3 );
- ldub( L4, L5, L6 );
- ldub( L7, -1, O0 );
- lduh( O1, O2, O3 );
- lduh( O4, -1, O5 );
- lduw( O6, O7, G0 );
- lduw( G1, -1, G2 );
- ldx( G3, G4, G5 );
- ldx( G6, -1, G7 );
- ldd( I0, I1, I2 );
- ldd( I3, -1, I4 );
-
- ldsba( I5, I6, 2, I7 );
- ldsba( L0, -1, L1 );
- ldsha( L2, L3, 3, L4 );
- ldsha( L5, -1, L6 );
- ldswa( L7, O0, (1 << 8) - 1, O1 );
- ldswa( O2, -1, O3 );
- lduba( O4, O5, 0, O6 );
- lduba( O7, -1, I0 );
- lduha( I1, I2, 1, I3 );
- lduha( I4, -1, I5 );
- lduwa( I6, I7, 2, L0 );
- lduwa( L1, -1, L2 );
- ldxa( L3, L4, 3, L5 );
- ldxa( L6, -1, L7 );
- ldda( G0, G1, 4, G2 );
- ldda( G3, -1, G4 );
-
- ldstub( G5, G6, G7 );
- ldstub( O0, -1, O1 );
-
- ldstuba( O2, O3, 5, O4 );
- ldstuba( O5, -1, O6 );
-
- and3( I0, L0, O0 );
- and3( G7, -1, O7 );
- andcc( L2, I2, G2 );
- andcc( L4, -1, G4 );
- andn( I5, I6, I7 );
- andn( I6, -1, I7 );
- andncc( I5, I6, I7 );
- andncc( I7, -1, I6 );
- or3( I5, I6, I7 );
- or3( I7, -1, I6 );
- orcc( I5, I6, I7 );
- orcc( I7, -1, I6 );
- orn( I5, I6, I7 );
- orn( I7, -1, I6 );
- orncc( I5, I6, I7 );
- orncc( I7, -1, I6 );
- xor3( I5, I6, I7 );
- xor3( I7, -1, I6 );
- xorcc( I5, I6, I7 );
- xorcc( I7, -1, I6 );
- xnor( I5, I6, I7 );
- xnor( I7, -1, I6 );
- xnorcc( I5, I6, I7 );
- xnorcc( I7, -1, I6 );
-
- membar( Membar_mask_bits(StoreStore | LoadStore | StoreLoad | LoadLoad | Sync | MemIssue | Lookaside ) );
- membar( StoreStore );
- membar( LoadStore );
- membar( StoreLoad );
- membar( LoadLoad );
- membar( Sync );
- membar( MemIssue );
- membar( Lookaside );
-
- fmov( FloatRegisterImpl::S, f_ordered, true, fcc2, F16, F17 );
- fmov( FloatRegisterImpl::D, rc_lz, L5, F18, F20 );
-
- movcc( overflowClear, false, icc, I6, L4 );
- movcc( f_unorderedOrEqual, true, fcc2, (1 << 10) - 1, O0 );
-
- movr( rc_nz, I5, I6, I7 );
- movr( rc_gz, L1, -1, L2 );
-
- mulx( I5, I6, I7 );
- mulx( I7, -1, I6 );
- sdivx( I5, I6, I7 );
- sdivx( I7, -1, I6 );
- udivx( I5, I6, I7 );
- udivx( I7, -1, I6 );
-
- umul( I5, I6, I7 );
- umul( I7, -1, I6 );
- smul( I5, I6, I7 );
- smul( I7, -1, I6 );
- umulcc( I5, I6, I7 );
- umulcc( I7, -1, I6 );
- smulcc( I5, I6, I7 );
- smulcc( I7, -1, I6 );
-
- mulscc( I5, I6, I7 );
- mulscc( I7, -1, I6 );
-
- nop();
-
-
- popc( G0, G1);
- popc( -1, G2);
-
- prefetch( L1, L2, severalReads );
- prefetch( L3, -1, oneRead );
- prefetcha( O3, O2, 6, severalWritesAndPossiblyReads );
- prefetcha( G2, -1, oneWrite );
-
- rett( I7, I7);
- delayed()->nop();
- rett( G0, -1, relocInfo::none);
- delayed()->nop();
-
- save( I5, I6, I7 );
- save( I7, -1, I6 );
- restore( I5, I6, I7 );
- restore( I7, -1, I6 );
-
- saved();
- restored();
-
- sethi( 0xaaaaaaaa, I3, Relocation::spec_simple(relocInfo::none));
-
- sll( I5, I6, I7 );
- sll( I7, 31, I6 );
- srl( I5, I6, I7 );
- srl( I7, 0, I6 );
- sra( I5, I6, I7 );
- sra( I7, 30, I6 );
- sllx( I5, I6, I7 );
- sllx( I7, 63, I6 );
- srlx( I5, I6, I7 );
- srlx( I7, 0, I6 );
- srax( I5, I6, I7 );
- srax( I7, 62, I6 );
-
- sir( -1 );
-
- stbar();
-
- stf( FloatRegisterImpl::Q, F40, G0, I7 );
- stf( FloatRegisterImpl::S, F18, I3, -1 );
-
- stfsr( L1, L2 );
- stfsr( I7, -1 );
- stxfsr( I6, I5 );
- stxfsr( L4, -1 );
-
- stfa( FloatRegisterImpl::D, F22, I6, I7, 7 );
- stfa( FloatRegisterImpl::Q, F44, G0, -1 );
-
- stb( L5, O2, I7 );
- stb( I7, I6, -1 );
- sth( L5, O2, I7 );
- sth( I7, I6, -1 );
- stw( L5, O2, I7 );
- stw( I7, I6, -1 );
- stx( L5, O2, I7 );
- stx( I7, I6, -1 );
- std( L5, O2, I7 );
- std( I7, I6, -1 );
-
- stba( L5, O2, I7, 8 );
- stba( I7, I6, -1 );
- stha( L5, O2, I7, 9 );
- stha( I7, I6, -1 );
- stwa( L5, O2, I7, 0 );
- stwa( I7, I6, -1 );
- stxa( L5, O2, I7, 11 );
- stxa( I7, I6, -1 );
- stda( L5, O2, I7, 12 );
- stda( I7, I6, -1 );
-
- sub( I5, I6, I7 );
- sub( I7, -1, I6 );
- subcc( I5, I6, I7 );
- subcc( I7, -1, I6 );
- subc( I5, I6, I7 );
- subc( I7, -1, I6 );
- subccc( I5, I6, I7 );
- subccc( I7, -1, I6 );
-
- swap( I5, I6, I7 );
- swap( I7, -1, I6 );
-
- swapa( G0, G1, 13, G2 );
- swapa( I7, -1, I6 );
-
- taddcc( I5, I6, I7 );
- taddcc( I7, -1, I6 );
- taddcctv( I5, I6, I7 );
- taddcctv( I7, -1, I6 );
-
- tsubcc( I5, I6, I7 );
- tsubcc( I7, -1, I6 );
- tsubcctv( I5, I6, I7 );
- tsubcctv( I7, -1, I6 );
-
- trap( overflowClear, xcc, G0, G1 );
- trap( lessEqual, icc, I7, 17 );
-
- bind(lbl2);
- bind(lbl3);
-
- code()->decode();
-}
-
-// Generate a bunch 'o stuff unique to V8
-void Assembler::test_v8_onlys() {
- Label lbl1;
-
- cb( cp_0or1or2, false, pc() - 4, relocInfo::none);
- delayed()->nop();
- cb( cp_never, true, lbl1);
- delayed()->nop();
-
- cpop1(1, 2, 3, 4);
- cpop2(5, 6, 7, 8);
-
- ldc( I0, I1, 31);
- ldc( I2, -1, 0);
-
- lddc( I4, I4, 30);
- lddc( I6, 0, 1 );
-
- ldcsr( L0, L1, 0);
- ldcsr( L1, (1 << 12) - 1, 17 );
-
- stc( 31, L4, L5);
- stc( 30, L6, -(1 << 12) );
-
- stdc( 0, L7, G0);
- stdc( 1, G1, 0 );
-
- stcsr( 16, G2, G3);
- stcsr( 17, G4, 1 );
-
- stdcq( 4, G5, G6);
- stdcq( 5, G7, -1 );
-
- bind(lbl1);
-
- code()->decode();
-}
-#endif
-
-// Implementation of MacroAssembler
-
-void MacroAssembler::null_check(Register reg, int offset) {
- if (needs_explicit_null_check((intptr_t)offset)) {
- // provoke OS NULL exception if reg = NULL by
- // accessing M[reg] w/o changing any registers
- ld_ptr(reg, 0, G0);
- }
- else {
- // nothing to do, (later) access of M[reg + offset]
- // will provoke OS NULL exception if reg = NULL
- }
-}
-
-// Ring buffer jumps
-
-#ifndef PRODUCT
-void MacroAssembler::ret( bool trace ) { if (trace) {
- mov(I7, O7); // traceable register
- JMP(O7, 2 * BytesPerInstWord);
- } else {
- jmpl( I7, 2 * BytesPerInstWord, G0 );
- }
- }
-
-void MacroAssembler::retl( bool trace ) { if (trace) JMP(O7, 2 * BytesPerInstWord);
- else jmpl( O7, 2 * BytesPerInstWord, G0 ); }
-#endif /* PRODUCT */
-
-
-void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) {
- assert_not_delayed();
- // This can only be traceable if r1 & r2 are visible after a window save
- if (TraceJumps) {
-#ifndef PRODUCT
- save_frame(0);
- verify_thread();
- ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
- add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
- sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
- add(O2, O1, O1);
-
- add(r1->after_save(), r2->after_save(), O2);
- set((intptr_t)file, O3);
- set(line, O4);
- Label L;
- // get nearby pc, store jmp target
- call(L, relocInfo::none); // No relocation for call to pc+0x8
- delayed()->st(O2, O1, 0);
- bind(L);
-
- // store nearby pc
- st(O7, O1, sizeof(intptr_t));
- // store file
- st(O3, O1, 2*sizeof(intptr_t));
- // store line
- st(O4, O1, 3*sizeof(intptr_t));
- add(O0, 1, O0);
- and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
- st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
- restore();
-#endif /* PRODUCT */
- }
- jmpl(r1, r2, G0);
-}
-void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) {
- assert_not_delayed();
- // This can only be traceable if r1 is visible after a window save
- if (TraceJumps) {
-#ifndef PRODUCT
- save_frame(0);
- verify_thread();
- ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
- add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
- sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
- add(O2, O1, O1);
-
- add(r1->after_save(), offset, O2);
- set((intptr_t)file, O3);
- set(line, O4);
- Label L;
- // get nearby pc, store jmp target
- call(L, relocInfo::none); // No relocation for call to pc+0x8
- delayed()->st(O2, O1, 0);
- bind(L);
-
- // store nearby pc
- st(O7, O1, sizeof(intptr_t));
- // store file
- st(O3, O1, 2*sizeof(intptr_t));
- // store line
- st(O4, O1, 3*sizeof(intptr_t));
- add(O0, 1, O0);
- and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
- st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
- restore();
-#endif /* PRODUCT */
- }
- jmp(r1, offset);
-}
-
-// This code sequence is relocatable to any address, even on LP64.
-void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) {
- assert_not_delayed();
- // Force fixed length sethi because NativeJump and NativeFarCall don't handle
- // variable length instruction streams.
- patchable_sethi(addrlit, temp);
- Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement.
- if (TraceJumps) {
-#ifndef PRODUCT
- // Must do the add here so relocation can find the remainder of the
- // value to be relocated.
- add(a.base(), a.disp(), a.base(), addrlit.rspec(offset));
- save_frame(0);
- verify_thread();
- ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
- add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
- sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
- add(O2, O1, O1);
-
- set((intptr_t)file, O3);
- set(line, O4);
- Label L;
-
- // get nearby pc, store jmp target
- call(L, relocInfo::none); // No relocation for call to pc+0x8
- delayed()->st(a.base()->after_save(), O1, 0);
- bind(L);
-
- // store nearby pc
- st(O7, O1, sizeof(intptr_t));
- // store file
- st(O3, O1, 2*sizeof(intptr_t));
- // store line
- st(O4, O1, 3*sizeof(intptr_t));
- add(O0, 1, O0);
- and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
- st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
- restore();
- jmpl(a.base(), G0, d);
-#else
- jmpl(a.base(), a.disp(), d);
-#endif /* PRODUCT */
- } else {
- jmpl(a.base(), a.disp(), d);
- }
-}
-
-void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) {
- jumpl(addrlit, temp, G0, offset, file, line);
-}
-
-
-// Conditional breakpoint (for assertion checks in assembly code)
-void MacroAssembler::breakpoint_trap(Condition c, CC cc) {
- trap(c, cc, G0, ST_RESERVED_FOR_USER_0);
-}
-
-// We want to use ST_BREAKPOINT here, but the debugger is confused by it.
-void MacroAssembler::breakpoint_trap() {
- trap(ST_RESERVED_FOR_USER_0);
-}
-
-// flush windows (except current) using flushw instruction if avail.
-void MacroAssembler::flush_windows() {
- if (VM_Version::v9_instructions_work()) flushw();
- else flush_windows_trap();
-}
-
-// Write serialization page so VM thread can do a pseudo remote membar
-// We use the current thread pointer to calculate a thread specific
-// offset to write to within the page. This minimizes bus traffic
-// due to cache line collision.
-void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
- srl(thread, os::get_serialize_page_shift_count(), tmp2);
- if (Assembler::is_simm13(os::vm_page_size())) {
- and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2);
- }
- else {
- set((os::vm_page_size() - sizeof(int)), tmp1);
- and3(tmp2, tmp1, tmp2);
- }
- set(os::get_memory_serialize_page(), tmp1);
- st(G0, tmp1, tmp2);
-}
-
-
-
-void MacroAssembler::enter() {
- Unimplemented();
-}
-
-void MacroAssembler::leave() {
- Unimplemented();
-}
-
-void MacroAssembler::mult(Register s1, Register s2, Register d) {
- if(VM_Version::v9_instructions_work()) {
- mulx (s1, s2, d);
- } else {
- smul (s1, s2, d);
- }
-}
-
-void MacroAssembler::mult(Register s1, int simm13a, Register d) {
- if(VM_Version::v9_instructions_work()) {
- mulx (s1, simm13a, d);
- } else {
- smul (s1, simm13a, d);
- }
-}
-
-
-#ifdef ASSERT
-void MacroAssembler::read_ccr_v8_assert(Register ccr_save) {
- const Register s1 = G3_scratch;
- const Register s2 = G4_scratch;
- Label get_psr_test;
- // Get the condition codes the V8 way.
- read_ccr_trap(s1);
- mov(ccr_save, s2);
- // This is a test of V8 which has icc but not xcc
- // so mask off the xcc bits
- and3(s2, 0xf, s2);
- // Compare condition codes from the V8 and V9 ways.
- subcc(s2, s1, G0);
- br(Assembler::notEqual, true, Assembler::pt, get_psr_test);
- delayed()->breakpoint_trap();
- bind(get_psr_test);
-}
-
-void MacroAssembler::write_ccr_v8_assert(Register ccr_save) {
- const Register s1 = G3_scratch;
- const Register s2 = G4_scratch;
- Label set_psr_test;
- // Write out the saved condition codes the V8 way
- write_ccr_trap(ccr_save, s1, s2);
- // Read back the condition codes using the V9 instruction
- rdccr(s1);
- mov(ccr_save, s2);
- // This is a test of V8 which has icc but not xcc
- // so mask off the xcc bits
- and3(s2, 0xf, s2);
- and3(s1, 0xf, s1);
- // Compare the V8 way with the V9 way.
- subcc(s2, s1, G0);
- br(Assembler::notEqual, true, Assembler::pt, set_psr_test);
- delayed()->breakpoint_trap();
- bind(set_psr_test);
-}
-#else
-#define read_ccr_v8_assert(x)
-#define write_ccr_v8_assert(x)
-#endif // ASSERT
-
-void MacroAssembler::read_ccr(Register ccr_save) {
- if (VM_Version::v9_instructions_work()) {
- rdccr(ccr_save);
- // Test code sequence used on V8. Do not move above rdccr.
- read_ccr_v8_assert(ccr_save);
- } else {
- read_ccr_trap(ccr_save);
- }
-}
-
-void MacroAssembler::write_ccr(Register ccr_save) {
- if (VM_Version::v9_instructions_work()) {
- // Test code sequence used on V8. Do not move below wrccr.
- write_ccr_v8_assert(ccr_save);
- wrccr(ccr_save);
- } else {
- const Register temp_reg1 = G3_scratch;
- const Register temp_reg2 = G4_scratch;
- write_ccr_trap(ccr_save, temp_reg1, temp_reg2);
- }
-}
-
-
-// Calls to C land
-
-#ifdef ASSERT
-// a hook for debugging
-static Thread* reinitialize_thread() {
- return ThreadLocalStorage::thread();
-}
-#else
-#define reinitialize_thread ThreadLocalStorage::thread
-#endif
-
-#ifdef ASSERT
-address last_get_thread = NULL;
-#endif
-
-// call this when G2_thread is not known to be valid
-void MacroAssembler::get_thread() {
- save_frame(0); // to avoid clobbering O0
- mov(G1, L0); // avoid clobbering G1
- mov(G5_method, L1); // avoid clobbering G5
- mov(G3, L2); // avoid clobbering G3 also
- mov(G4, L5); // avoid clobbering G4
-#ifdef ASSERT
- AddressLiteral last_get_thread_addrlit(&last_get_thread);
- set(last_get_thread_addrlit, L3);
- inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call
- st_ptr(L4, L3, 0);
-#endif
- call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type);
- delayed()->nop();
- mov(L0, G1);
- mov(L1, G5_method);
- mov(L2, G3);
- mov(L5, G4);
- restore(O0, 0, G2_thread);
-}
-
-static Thread* verify_thread_subroutine(Thread* gthread_value) {
- Thread* correct_value = ThreadLocalStorage::thread();
- guarantee(gthread_value == correct_value, "G2_thread value must be the thread");
- return correct_value;
-}
-
-void MacroAssembler::verify_thread() {
- if (VerifyThread) {
- // NOTE: this chops off the heads of the 64-bit O registers.
-#ifdef CC_INTERP
- save_frame(0);
-#else
- // make sure G2_thread contains the right value
- save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof)
- mov(G1, L1); // avoid clobbering G1
- // G2 saved below
- mov(G3, L3); // avoid clobbering G3
- mov(G4, L4); // avoid clobbering G4
- mov(G5_method, L5); // avoid clobbering G5_method
-#endif /* CC_INTERP */
-#if defined(COMPILER2) && !defined(_LP64)
- // Save & restore possible 64-bit Long arguments in G-regs
- srlx(G1,32,L0);
- srlx(G4,32,L6);
-#endif
- call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type);
- delayed()->mov(G2_thread, O0);
-
- mov(L1, G1); // Restore G1
- // G2 restored below
- mov(L3, G3); // restore G3
- mov(L4, G4); // restore G4
- mov(L5, G5_method); // restore G5_method
-#if defined(COMPILER2) && !defined(_LP64)
- // Save & restore possible 64-bit Long arguments in G-regs
- sllx(L0,32,G2); // Move old high G1 bits high in G2
- srl(G1, 0,G1); // Clear current high G1 bits
- or3 (G1,G2,G1); // Recover 64-bit G1
- sllx(L6,32,G2); // Move old high G4 bits high in G2
- srl(G4, 0,G4); // Clear current high G4 bits
- or3 (G4,G2,G4); // Recover 64-bit G4
-#endif
- restore(O0, 0, G2_thread);
- }
-}
-
-
-void MacroAssembler::save_thread(const Register thread_cache) {
- verify_thread();
- if (thread_cache->is_valid()) {
- assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile");
- mov(G2_thread, thread_cache);
- }
- if (VerifyThread) {
- // smash G2_thread, as if the VM were about to anyway
- set(0x67676767, G2_thread);
- }
-}
-
-
-void MacroAssembler::restore_thread(const Register thread_cache) {
- if (thread_cache->is_valid()) {
- assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile");
- mov(thread_cache, G2_thread);
- verify_thread();
- } else {
- // do it the slow way
- get_thread();
- }
-}
-
-
-// %%% maybe get rid of [re]set_last_Java_frame
-void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) {
- assert_not_delayed();
- Address flags(G2_thread, JavaThread::frame_anchor_offset() +
- JavaFrameAnchor::flags_offset());
- Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset());
-
- // Always set last_Java_pc and flags first because once last_Java_sp is visible
- // has_last_Java_frame is true and users will look at the rest of the fields.
- // (Note: flags should always be zero before we get here so doesn't need to be set.)
-
-#ifdef ASSERT
- // Verify that flags was zeroed on return to Java
- Label PcOk;
- save_frame(0); // to avoid clobbering O0
- ld_ptr(pc_addr, L0);
- br_null_short(L0, Assembler::pt, PcOk);
- STOP("last_Java_pc not zeroed before leaving Java");
- bind(PcOk);
-
- // Verify that flags was zeroed on return to Java
- Label FlagsOk;
- ld(flags, L0);
- tst(L0);
- br(Assembler::zero, false, Assembler::pt, FlagsOk);
- delayed() -> restore();
- STOP("flags not zeroed before leaving Java");
- bind(FlagsOk);
-#endif /* ASSERT */
- //
- // When returning from calling out from Java mode the frame anchor's last_Java_pc
- // will always be set to NULL. It is set here so that if we are doing a call to
- // native (not VM) that we capture the known pc and don't have to rely on the
- // native call having a standard frame linkage where we can find the pc.
-
- if (last_Java_pc->is_valid()) {
- st_ptr(last_Java_pc, pc_addr);
- }
-
-#ifdef _LP64
-#ifdef ASSERT
- // Make sure that we have an odd stack
- Label StackOk;
- andcc(last_java_sp, 0x01, G0);
- br(Assembler::notZero, false, Assembler::pt, StackOk);
- delayed()->nop();
- STOP("Stack Not Biased in set_last_Java_frame");
- bind(StackOk);
-#endif // ASSERT
- assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame");
- add( last_java_sp, STACK_BIAS, G4_scratch );
- st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset());
-#else
- st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset());
-#endif // _LP64
-}
-
-void MacroAssembler::reset_last_Java_frame(void) {
- assert_not_delayed();
-
- Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset());
- Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
- Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
-
-#ifdef ASSERT
- // check that it WAS previously set
-#ifdef CC_INTERP
- save_frame(0);
-#else
- save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame for -Xprof
-#endif /* CC_INTERP */
- ld_ptr(sp_addr, L0);
- tst(L0);
- breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
- restore();
-#endif // ASSERT
-
- st_ptr(G0, sp_addr);
- // Always return last_Java_pc to zero
- st_ptr(G0, pc_addr);
- // Always null flags after return to Java
- st(G0, flags);
-}
-
-
-void MacroAssembler::call_VM_base(
- Register oop_result,
- Register thread_cache,
- Register last_java_sp,
- address entry_point,
- int number_of_arguments,
- bool check_exceptions)
-{
- assert_not_delayed();
-
- // determine last_java_sp register
- if (!last_java_sp->is_valid()) {
- last_java_sp = SP;
- }
- // debugging support
- assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
-
- // 64-bit last_java_sp is biased!
- set_last_Java_frame(last_java_sp, noreg);
- if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early
- save_thread(thread_cache);
- // do the call
- call(entry_point, relocInfo::runtime_call_type);
- if (!VerifyThread)
- delayed()->mov(G2_thread, O0); // pass thread as first argument
- else
- delayed()->nop(); // (thread already passed)
- restore_thread(thread_cache);
- reset_last_Java_frame();
-
- // check for pending exceptions. use Gtemp as scratch register.
- if (check_exceptions) {
- check_and_forward_exception(Gtemp);
- }
-
-#ifdef ASSERT
- set(badHeapWordVal, G3);
- set(badHeapWordVal, G4);
- set(badHeapWordVal, G5);
-#endif
-
- // get oop result if there is one and reset the value in the thread
- if (oop_result->is_valid()) {
- get_vm_result(oop_result);
- }
-}
-
-void MacroAssembler::check_and_forward_exception(Register scratch_reg)
-{
- Label L;
-
- check_and_handle_popframe(scratch_reg);
- check_and_handle_earlyret(scratch_reg);
-
- Address exception_addr(G2_thread, Thread::pending_exception_offset());
- ld_ptr(exception_addr, scratch_reg);
- br_null_short(scratch_reg, pt, L);
- // we use O7 linkage so that forward_exception_entry has the issuing PC
- call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
- delayed()->nop();
- bind(L);
-}
-
-
-void MacroAssembler::check_and_handle_popframe(Register scratch_reg) {
-}
-
-
-void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
-}
-
-
-void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
- call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
-}
-
-
-void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
- // O0 is reserved for the thread
- mov(arg_1, O1);
- call_VM(oop_result, entry_point, 1, check_exceptions);
-}
-
-
-void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
- // O0 is reserved for the thread
- mov(arg_1, O1);
- mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
- call_VM(oop_result, entry_point, 2, check_exceptions);
-}
-
-
-void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
- // O0 is reserved for the thread
- mov(arg_1, O1);
- mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
- mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument");
- call_VM(oop_result, entry_point, 3, check_exceptions);
-}
-
-
-
-// Note: The following call_VM overloadings are useful when a "save"
-// has already been performed by a stub, and the last Java frame is
-// the previous one. In that case, last_java_sp must be passed as FP
-// instead of SP.
-
-
-void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) {
- call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions);
-}
-
-
-void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
- // O0 is reserved for the thread
- mov(arg_1, O1);
- call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
-}
-
-
-void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
- // O0 is reserved for the thread
- mov(arg_1, O1);
- mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
- call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
-}
-
-
-void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
- // O0 is reserved for the thread
- mov(arg_1, O1);
- mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
- mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument");
- call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
-}
-
-
-
-void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) {
- assert_not_delayed();
- save_thread(thread_cache);
- // do the call
- call(entry_point, relocInfo::runtime_call_type);
- delayed()->nop();
- restore_thread(thread_cache);
-#ifdef ASSERT
- set(badHeapWordVal, G3);
- set(badHeapWordVal, G4);
- set(badHeapWordVal, G5);
-#endif
-}
-
-
-void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) {
- call_VM_leaf_base(thread_cache, entry_point, number_of_arguments);
-}
-
-
-void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) {
- mov(arg_1, O0);
- call_VM_leaf(thread_cache, entry_point, 1);
-}
-
-
-void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
- mov(arg_1, O0);
- mov(arg_2, O1); assert(arg_2 != O0, "smashed argument");
- call_VM_leaf(thread_cache, entry_point, 2);
-}
-
-
-void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) {
- mov(arg_1, O0);
- mov(arg_2, O1); assert(arg_2 != O0, "smashed argument");
- mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument");
- call_VM_leaf(thread_cache, entry_point, 3);
-}
-
-
-void MacroAssembler::get_vm_result(Register oop_result) {
- verify_thread();
- Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
- ld_ptr( vm_result_addr, oop_result);
- st_ptr(G0, vm_result_addr);
- verify_oop(oop_result);
-}
-
-
-void MacroAssembler::get_vm_result_2(Register metadata_result) {
- verify_thread();
- Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
- ld_ptr(vm_result_addr_2, metadata_result);
- st_ptr(G0, vm_result_addr_2);
-}
-
-
-// We require that C code which does not return a value in vm_result will
-// leave it undisturbed.
-void MacroAssembler::set_vm_result(Register oop_result) {
- verify_thread();
- Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
- verify_oop(oop_result);
-
-# ifdef ASSERT
- // Check that we are not overwriting any other oop.
-#ifdef CC_INTERP
- save_frame(0);
-#else
- save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod for -Xprof
-#endif /* CC_INTERP */
- ld_ptr(vm_result_addr, L0);
- tst(L0);
- restore();
- breakpoint_trap(notZero, Assembler::ptr_cc);
- // }
-# endif
-
- st_ptr(oop_result, vm_result_addr);
-}
-
-
-void MacroAssembler::ic_call(address entry, bool emit_delay) {
- RelocationHolder rspec = virtual_call_Relocation::spec(pc());
- patchable_set((intptr_t)Universe::non_oop_word(), G5_inline_cache_reg);
- relocate(rspec);
- call(entry, relocInfo::none);
- if (emit_delay) {
- delayed()->nop();
- }
-}
-
-
-void MacroAssembler::card_table_write(jbyte* byte_map_base,
- Register tmp, Register obj) {
-#ifdef _LP64
- srlx(obj, CardTableModRefBS::card_shift, obj);
-#else
- srl(obj, CardTableModRefBS::card_shift, obj);
-#endif
- assert(tmp != obj, "need separate temp reg");
- set((address) byte_map_base, tmp);
- stb(G0, tmp, obj);
-}
-
-
-void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
- address save_pc;
- int shiftcnt;
-#ifdef _LP64
-# ifdef CHECK_DELAY
- assert_not_delayed((char*) "cannot put two instructions in delay slot");
-# endif
- v9_dep();
- save_pc = pc();
-
- int msb32 = (int) (addrlit.value() >> 32);
- int lsb32 = (int) (addrlit.value());
-
- if (msb32 == 0 && lsb32 >= 0) {
- Assembler::sethi(lsb32, d, addrlit.rspec());
- }
- else if (msb32 == -1) {
- Assembler::sethi(~lsb32, d, addrlit.rspec());
- xor3(d, ~low10(~0), d);
- }
- else {
- Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits
- if (msb32 & 0x3ff) // Any bits?
- or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32
- if (lsb32 & 0xFFFFFC00) { // done?
- if ((lsb32 >> 20) & 0xfff) { // Any bits set?
- sllx(d, 12, d); // Make room for next 12 bits
- or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12
- shiftcnt = 0; // We already shifted
- }
- else
- shiftcnt = 12;
- if ((lsb32 >> 10) & 0x3ff) {
- sllx(d, shiftcnt + 10, d); // Make room for last 10 bits
- or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10
- shiftcnt = 0;
- }
- else
- shiftcnt = 10;
- sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd
- }
- else
- sllx(d, 32, d);
- }
- // Pad out the instruction sequence so it can be patched later.
- if (ForceRelocatable || (addrlit.rtype() != relocInfo::none &&
- addrlit.rtype() != relocInfo::runtime_call_type)) {
- while (pc() < (save_pc + (7 * BytesPerInstWord)))
- nop();
- }
-#else
- Assembler::sethi(addrlit.value(), d, addrlit.rspec());
-#endif
-}
-
-
-void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) {
- internal_sethi(addrlit, d, false);
-}
-
-
-void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) {
- internal_sethi(addrlit, d, true);
-}
-
-
-int MacroAssembler::insts_for_sethi(address a, bool worst_case) {
-#ifdef _LP64
- if (worst_case) return 7;
- intptr_t iaddr = (intptr_t) a;
- int msb32 = (int) (iaddr >> 32);
- int lsb32 = (int) (iaddr);
- int count;
- if (msb32 == 0 && lsb32 >= 0)
- count = 1;
- else if (msb32 == -1)
- count = 2;
- else {
- count = 2;
- if (msb32 & 0x3ff)
- count++;
- if (lsb32 & 0xFFFFFC00 ) {
- if ((lsb32 >> 20) & 0xfff) count += 2;
- if ((lsb32 >> 10) & 0x3ff) count += 2;
- }
- }
- return count;
-#else
- return 1;
-#endif
-}
-
-int MacroAssembler::worst_case_insts_for_set() {
- return insts_for_sethi(NULL, true) + 1;
-}
-
-
-// Keep in sync with MacroAssembler::insts_for_internal_set
-void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
- intptr_t value = addrlit.value();
-
- if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) {
- // can optimize
- if (-4096 <= value && value <= 4095) {
- or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended)
- return;
- }
- if (inv_hi22(hi22(value)) == value) {
- sethi(addrlit, d);
- return;
- }
- }
- assert_not_delayed((char*) "cannot put two instructions in delay slot");
- internal_sethi(addrlit, d, ForceRelocatable);
- if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) {
- add(d, addrlit.low10(), d, addrlit.rspec());
- }
-}
-
-// Keep in sync with MacroAssembler::internal_set
-int MacroAssembler::insts_for_internal_set(intptr_t value) {
- // can optimize
- if (-4096 <= value && value <= 4095) {
- return 1;
- }
- if (inv_hi22(hi22(value)) == value) {
- return insts_for_sethi((address) value);
- }
- int count = insts_for_sethi((address) value);
- AddressLiteral al(value);
- if (al.low10() != 0) {
- count++;
- }
- return count;
-}
-
-void MacroAssembler::set(const AddressLiteral& al, Register d) {
- internal_set(al, d, false);
-}
-
-void MacroAssembler::set(intptr_t value, Register d) {
- AddressLiteral al(value);
- internal_set(al, d, false);
-}
-
-void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) {
- AddressLiteral al(addr, rspec);
- internal_set(al, d, false);
-}
-
-void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) {
- internal_set(al, d, true);
-}
-
-void MacroAssembler::patchable_set(intptr_t value, Register d) {
- AddressLiteral al(value);
- internal_set(al, d, true);
-}
-
-
-void MacroAssembler::set64(jlong value, Register d, Register tmp) {
- assert_not_delayed();
- v9_dep();
-
- int hi = (int)(value >> 32);
- int lo = (int)(value & ~0);
- // (Matcher::isSimpleConstant64 knows about the following optimizations.)
- if (Assembler::is_simm13(lo) && value == lo) {
- or3(G0, lo, d);
- } else if (hi == 0) {
- Assembler::sethi(lo, d); // hardware version zero-extends to upper 32
- if (low10(lo) != 0)
- or3(d, low10(lo), d);
- }
- else if (hi == -1) {
- Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32
- xor3(d, low10(lo) ^ ~low10(~0), d);
- }
- else if (lo == 0) {
- if (Assembler::is_simm13(hi)) {
- or3(G0, hi, d);
- } else {
- Assembler::sethi(hi, d); // hardware version zero-extends to upper 32
- if (low10(hi) != 0)
- or3(d, low10(hi), d);
- }
- sllx(d, 32, d);
- }
- else {
- Assembler::sethi(hi, tmp);
- Assembler::sethi(lo, d); // macro assembler version sign-extends
- if (low10(hi) != 0)
- or3 (tmp, low10(hi), tmp);
- if (low10(lo) != 0)
- or3 ( d, low10(lo), d);
- sllx(tmp, 32, tmp);
- or3 (d, tmp, d);
- }
-}
-
-int MacroAssembler::insts_for_set64(jlong value) {
- v9_dep();
-
- int hi = (int) (value >> 32);
- int lo = (int) (value & ~0);
- int count = 0;
-
- // (Matcher::isSimpleConstant64 knows about the following optimizations.)
- if (Assembler::is_simm13(lo) && value == lo) {
- count++;
- } else if (hi == 0) {
- count++;
- if (low10(lo) != 0)
- count++;
- }
- else if (hi == -1) {
- count += 2;
- }
- else if (lo == 0) {
- if (Assembler::is_simm13(hi)) {
- count++;
- } else {
- count++;
- if (low10(hi) != 0)
- count++;
- }
- count++;
- }
- else {
- count += 2;
- if (low10(hi) != 0)
- count++;
- if (low10(lo) != 0)
- count++;
- count += 2;
- }
- return count;
-}
-
-// compute size in bytes of sparc frame, given
-// number of extraWords
-int MacroAssembler::total_frame_size_in_bytes(int extraWords) {
-
- int nWords = frame::memory_parameter_word_sp_offset;
-
- nWords += extraWords;
-
- if (nWords & 1) ++nWords; // round up to double-word
-
- return nWords * BytesPerWord;
-}
-
-
-// save_frame: given number of "extra" words in frame,
-// issue approp. save instruction (p 200, v8 manual)
-
-void MacroAssembler::save_frame(int extraWords) {
- int delta = -total_frame_size_in_bytes(extraWords);
- if (is_simm13(delta)) {
- save(SP, delta, SP);
- } else {
- set(delta, G3_scratch);
- save(SP, G3_scratch, SP);
- }
-}
-
-
-void MacroAssembler::save_frame_c1(int size_in_bytes) {
- if (is_simm13(-size_in_bytes)) {
- save(SP, -size_in_bytes, SP);
- } else {
- set(-size_in_bytes, G3_scratch);
- save(SP, G3_scratch, SP);
- }
-}
-
-
-void MacroAssembler::save_frame_and_mov(int extraWords,
- Register s1, Register d1,
- Register s2, Register d2) {
- assert_not_delayed();
-
- // The trick here is to use precisely the same memory word
- // that trap handlers also use to save the register.
- // This word cannot be used for any other purpose, but
- // it works fine to save the register's value, whether or not
- // an interrupt flushes register windows at any given moment!
- Address s1_addr;
- if (s1->is_valid() && (s1->is_in() || s1->is_local())) {
- s1_addr = s1->address_in_saved_window();
- st_ptr(s1, s1_addr);
- }
-
- Address s2_addr;
- if (s2->is_valid() && (s2->is_in() || s2->is_local())) {
- s2_addr = s2->address_in_saved_window();
- st_ptr(s2, s2_addr);
- }
-
- save_frame(extraWords);
-
- if (s1_addr.base() == SP) {
- ld_ptr(s1_addr.after_save(), d1);
- } else if (s1->is_valid()) {
- mov(s1->after_save(), d1);
- }
-
- if (s2_addr.base() == SP) {
- ld_ptr(s2_addr.after_save(), d2);
- } else if (s2->is_valid()) {
- mov(s2->after_save(), d2);
- }
-}
-
-
-AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
- assert(oop_recorder() != NULL, "this assembler needs a Recorder");
- int index = oop_recorder()->allocate_metadata_index(obj);
- RelocationHolder rspec = metadata_Relocation::spec(index);
- return AddressLiteral((address)obj, rspec);
-}
-
-AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
- assert(oop_recorder() != NULL, "this assembler needs a Recorder");
- int index = oop_recorder()->find_index(obj);
- RelocationHolder rspec = metadata_Relocation::spec(index);
- return AddressLiteral((address)obj, rspec);
-}
-
-
-AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
- assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
- assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop");
- int oop_index = oop_recorder()->find_index(obj);
- return AddressLiteral(obj, oop_Relocation::spec(oop_index));
-}
-
-void MacroAssembler::set_narrow_oop(jobject obj, Register d) {
- assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
- int oop_index = oop_recorder()->find_index(obj);
- RelocationHolder rspec = oop_Relocation::spec(oop_index);
-
- assert_not_delayed();
- // Relocation with special format (see relocInfo_sparc.hpp).
- relocate(rspec, 1);
- // Assembler::sethi(0x3fffff, d);
- emit_long( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) );
- // Don't add relocation for 'add'. Do patching during 'sethi' processing.
- add(d, 0x3ff, d);
-
-}
-
-void MacroAssembler::set_narrow_klass(Klass* k, Register d) {
- assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
- int klass_index = oop_recorder()->find_index(k);
- RelocationHolder rspec = metadata_Relocation::spec(klass_index);
- narrowOop encoded_k = oopDesc::encode_klass(k);
-
- assert_not_delayed();
- // Relocation with special format (see relocInfo_sparc.hpp).
- relocate(rspec, 1);
- // Assembler::sethi(encoded_k, d);
- emit_long( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(encoded_k) );
- // Don't add relocation for 'add'. Do patching during 'sethi' processing.
- add(d, low10(encoded_k), d);
-
-}
-
-void MacroAssembler::align(int modulus) {
- while (offset() % modulus != 0) nop();
-}
-
-
-void MacroAssembler::safepoint() {
- relocate(breakpoint_Relocation::spec(breakpoint_Relocation::safepoint));
-}
-
-
-void RegistersForDebugging::print(outputStream* s) {
- FlagSetting fs(Debugging, true);
- int j;
- for (j = 0; j < 8; ++j) {
- if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); }
- else { s->print( "fp = " ); os::print_location(s, i[j]); }
- }
- s->cr();
-
- for (j = 0; j < 8; ++j) {
- s->print("l%d = ", j); os::print_location(s, l[j]);
- }
- s->cr();
-
- for (j = 0; j < 8; ++j) {
- if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); }
- else { s->print( "sp = " ); os::print_location(s, o[j]); }
- }
- s->cr();
-
- for (j = 0; j < 8; ++j) {
- s->print("g%d = ", j); os::print_location(s, g[j]);
- }
- s->cr();
-
- // print out floats with compression
- for (j = 0; j < 32; ) {
- jfloat val = f[j];
- int last = j;
- for ( ; last+1 < 32; ++last ) {
- char b1[1024], b2[1024];
- sprintf(b1, "%f", val);
- sprintf(b2, "%f", f[last+1]);
- if (strcmp(b1, b2))
- break;
- }
- s->print("f%d", j);
- if ( j != last ) s->print(" - f%d", last);
- s->print(" = %f", val);
- s->fill_to(25);
- s->print_cr(" (0x%x)", val);
- j = last + 1;
- }
- s->cr();
-
- // and doubles (evens only)
- for (j = 0; j < 32; ) {
- jdouble val = d[j];
- int last = j;
- for ( ; last+1 < 32; ++last ) {
- char b1[1024], b2[1024];
- sprintf(b1, "%f", val);
- sprintf(b2, "%f", d[last+1]);
- if (strcmp(b1, b2))
- break;
- }
- s->print("d%d", 2 * j);
- if ( j != last ) s->print(" - d%d", last);
- s->print(" = %f", val);
- s->fill_to(30);
- s->print("(0x%x)", *(int*)&val);
- s->fill_to(42);
- s->print_cr("(0x%x)", *(1 + (int*)&val));
- j = last + 1;
- }
- s->cr();
-}
-
-void RegistersForDebugging::save_registers(MacroAssembler* a) {
- a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
- a->flush_windows();
- int i;
- for (i = 0; i < 8; ++i) {
- a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i));
- a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i));
- a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i));
- a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i));
- }
- for (i = 0; i < 32; ++i) {
- a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i));
- }
- for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
- a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i));
- }
-}
-
-void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) {
- for (int i = 1; i < 8; ++i) {
- a->ld_ptr(r, g_offset(i), as_gRegister(i));
- }
- for (int j = 0; j < 32; ++j) {
- a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j));
- }
- for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) {
- a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k));
- }
-}
-
-
-// pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
-void MacroAssembler::push_fTOS() {
- // %%%%%% need to implement this
-}
-
-// pops double TOS element from CPU stack and pushes on FPU stack
-void MacroAssembler::pop_fTOS() {
- // %%%%%% need to implement this
-}
-
-void MacroAssembler::empty_FPU_stack() {
- // %%%%%% need to implement this
-}
-
-void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) {
- // plausibility check for oops
- if (!VerifyOops) return;
-
- if (reg == G0) return; // always NULL, which is always an oop
-
- BLOCK_COMMENT("verify_oop {");
- char buffer[64];
-#ifdef COMPILER1
- if (CommentedAssembly) {
- snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset());
- block_comment(buffer);
- }
-#endif
-
- int len = strlen(file) + strlen(msg) + 1 + 4;
- sprintf(buffer, "%d", line);
- len += strlen(buffer);
- sprintf(buffer, " at offset %d ", offset());
- len += strlen(buffer);
- char * real_msg = new char[len];
- sprintf(real_msg, "%s%s(%s:%d)", msg, buffer, file, line);
-
- // Call indirectly to solve generation ordering problem
- AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
-
- // Make some space on stack above the current register window.
- // Enough to hold 8 64-bit registers.
- add(SP,-8*8,SP);
-
- // Save some 64-bit registers; a normal 'save' chops the heads off
- // of 64-bit longs in the 32-bit build.
- stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8);
- stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8);
- mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed
- stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
-
- // Size of set() should stay the same
- patchable_set((intptr_t)real_msg, O1);
- // Load address to call to into O7
- load_ptr_contents(a, O7);
- // Register call to verify_oop_subroutine
- callr(O7, G0);
- delayed()->nop();
- // recover frame size
- add(SP, 8*8,SP);
- BLOCK_COMMENT("} verify_oop");
-}
-
-void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) {
- // plausibility check for oops
- if (!VerifyOops) return;
-
- char buffer[64];
- sprintf(buffer, "%d", line);
- int len = strlen(file) + strlen(msg) + 1 + 4 + strlen(buffer);
- sprintf(buffer, " at SP+%d ", addr.disp());
- len += strlen(buffer);
- char * real_msg = new char[len];
- sprintf(real_msg, "%s at SP+%d (%s:%d)", msg, addr.disp(), file, line);
-
- // Call indirectly to solve generation ordering problem
- AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
-
- // Make some space on stack above the current register window.
- // Enough to hold 8 64-bit registers.
- add(SP,-8*8,SP);
-
- // Save some 64-bit registers; a normal 'save' chops the heads off
- // of 64-bit longs in the 32-bit build.
- stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8);
- stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8);
- ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed
- stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
-
- // Size of set() should stay the same
- patchable_set((intptr_t)real_msg, O1);
- // Load address to call to into O7
- load_ptr_contents(a, O7);
- // Register call to verify_oop_subroutine
- callr(O7, G0);
- delayed()->nop();
- // recover frame size
- add(SP, 8*8,SP);
-}
-
-// side-door communication with signalHandler in os_solaris.cpp
-address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL };
-
-// This macro is expanded just once; it creates shared code. Contract:
-// receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY
-// registers, including flags. May not use a register 'save', as this blows
-// the high bits of the O-regs if they contain Long values. Acts as a 'leaf'
-// call.
-void MacroAssembler::verify_oop_subroutine() {
- assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" );
-
- // Leaf call; no frame.
- Label succeed, fail, null_or_fail;
-
- // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home).
- // O0 is now the oop to be checked. O7 is the return address.
- Register O0_obj = O0;
-
- // Save some more registers for temps.
- stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8);
- stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8);
- stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8);
- stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8);
-
- // Save flags
- Register O5_save_flags = O5;
- rdccr( O5_save_flags );
-
- { // count number of verifies
- Register O2_adr = O2;
- Register O3_accum = O3;
- inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum);
- }
-
- Register O2_mask = O2;
- Register O3_bits = O3;
- Register O4_temp = O4;
-
- // mark lower end of faulting range
- assert(_verify_oop_implicit_branch[0] == NULL, "set once");
- _verify_oop_implicit_branch[0] = pc();
-
- // We can't check the mark oop because it could be in the process of
- // locking or unlocking while this is running.
- set(Universe::verify_oop_mask (), O2_mask);
- set(Universe::verify_oop_bits (), O3_bits);
-
- // assert((obj & oop_mask) == oop_bits);
- and3(O0_obj, O2_mask, O4_temp);
- cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail);
-
- if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) {
- // the null_or_fail case is useless; must test for null separately
- br_null_short(O0_obj, pn, succeed);
- }
-
- // Check the Klass* of this object for being in the right area of memory.
- // Cannot do the load in the delay above slot in case O0 is null
- load_klass(O0_obj, O0_obj);
- // assert((klass != NULL)
- br_null_short(O0_obj, pn, fail);
- // TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers
-
- wrccr( O5_save_flags ); // Restore CCR's
-
- // mark upper end of faulting range
- _verify_oop_implicit_branch[1] = pc();
-
- //-----------------------
- // all tests pass
- bind(succeed);
-
- // Restore prior 64-bit registers
- ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0);
- ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1);
- ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2);
- ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3);
- ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4);
- ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5);
-
- retl(); // Leaf return; restore prior O7 in delay slot
- delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7);
-
- //-----------------------
- bind(null_or_fail); // nulls are less common but OK
- br_null(O0_obj, false, pt, succeed);
- delayed()->wrccr( O5_save_flags ); // Restore CCR's
-
- //-----------------------
- // report failure:
- bind(fail);
- _verify_oop_implicit_branch[2] = pc();
-
- wrccr( O5_save_flags ); // Restore CCR's
-
- save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
-
- // stop_subroutine expects message pointer in I1.
- mov(I1, O1);
-
- // Restore prior 64-bit registers
- ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0);
- ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1);
- ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2);
- ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3);
- ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4);
- ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5);
-
- // factor long stop-sequence into subroutine to save space
- assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
-
- // call indirectly to solve generation ordering problem
- AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address());
- load_ptr_contents(al, O5);
- jmpl(O5, 0, O7);
- delayed()->nop();
-}
-
-
-void MacroAssembler::stop(const char* msg) {
- // save frame first to get O7 for return address
- // add one word to size in case struct is odd number of words long
- // It must be doubleword-aligned for storing doubles into it.
-
- save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
-
- // stop_subroutine expects message pointer in I1.
- // Size of set() should stay the same
- patchable_set((intptr_t)msg, O1);
-
- // factor long stop-sequence into subroutine to save space
- assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
-
- // call indirectly to solve generation ordering problem
- AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address());
- load_ptr_contents(a, O5);
- jmpl(O5, 0, O7);
- delayed()->nop();
-
- breakpoint_trap(); // make stop actually stop rather than writing
- // unnoticeable results in the output files.
-
- // restore(); done in callee to save space!
-}
-
-
-void MacroAssembler::warn(const char* msg) {
- save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
- RegistersForDebugging::save_registers(this);
- mov(O0, L0);
- // Size of set() should stay the same
- patchable_set((intptr_t)msg, O0);
- call( CAST_FROM_FN_PTR(address, warning) );
- delayed()->nop();
-// ret();
-// delayed()->restore();
- RegistersForDebugging::restore_registers(this, L0);
- restore();
-}
-
-
-void MacroAssembler::untested(const char* what) {
- // We must be able to turn interactive prompting off
- // in order to run automated test scripts on the VM
- // Use the flag ShowMessageBoxOnError
-
- char* b = new char[1024];
- sprintf(b, "untested: %s", what);
-
- if (ShowMessageBoxOnError) { STOP(b); }
- else { warn(b); }
-}
-
-
-void MacroAssembler::stop_subroutine() {
- RegistersForDebugging::save_registers(this);
-
- // for the sake of the debugger, stick a PC on the current frame
- // (this assumes that the caller has performed an extra "save")
- mov(I7, L7);
- add(O7, -7 * BytesPerInt, I7);
-
- save_frame(); // one more save to free up another O7 register
- mov(I0, O1); // addr of reg save area
-
- // We expect pointer to message in I1. Caller must set it up in O1
- mov(I1, O0); // get msg
- call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type);
- delayed()->nop();
-
- restore();
-
- RegistersForDebugging::restore_registers(this, O0);
-
- save_frame(0);
- call(CAST_FROM_FN_PTR(address,breakpoint));
- delayed()->nop();
- restore();
-
- mov(L7, I7);
- retl();
- delayed()->restore(); // see stop above
-}
-
-
-void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) {
- if ( ShowMessageBoxOnError ) {
- JavaThread* thread = JavaThread::current();
- JavaThreadState saved_state = thread->thread_state();
- thread->set_thread_state(_thread_in_vm);
- {
- // In order to get locks work, we need to fake a in_VM state
- ttyLocker ttyl;
- ::tty->print_cr("EXECUTION STOPPED: %s\n", msg);
- if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
- BytecodeCounter::print();
- }
- if (os::message_box(msg, "Execution stopped, print registers?"))
- regs->print(::tty);
- }
- BREAKPOINT;
- ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state);
- }
- else {
- ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
- }
- assert(false, err_msg("DEBUG MESSAGE: %s", msg));
-}
-
-#ifndef PRODUCT
-void MacroAssembler::test() {
- ResourceMark rm;
-
- CodeBuffer cb("test", 10000, 10000);
- MacroAssembler* a = new MacroAssembler(&cb);
- VM_Version::allow_all();
- a->test_v9();
- a->test_v8_onlys();
- VM_Version::revert();
-
- StubRoutines::Sparc::test_stop_entry()();
-}
-#endif
-
-
-void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) {
- subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words?
- Label no_extras;
- br( negative, true, pt, no_extras ); // if neg, clear reg
- delayed()->set(0, Rresult); // annuled, so only if taken
- bind( no_extras );
-}
-
-
-void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) {
-#ifdef _LP64
- add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult);
-#else
- add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult);
-#endif
- bclr(1, Rresult);
- sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes
-}
-
-
-void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) {
- calc_frame_size(Rextra_words, Rresult);
- neg(Rresult);
- save(SP, Rresult, SP);
-}
-
-
-// ---------------------------------------------------------
-Assembler::RCondition cond2rcond(Assembler::Condition c) {
- switch (c) {
- /*case zero: */
- case Assembler::equal: return Assembler::rc_z;
- case Assembler::lessEqual: return Assembler::rc_lez;
- case Assembler::less: return Assembler::rc_lz;
- /*case notZero:*/
- case Assembler::notEqual: return Assembler::rc_nz;
- case Assembler::greater: return Assembler::rc_gz;
- case Assembler::greaterEqual: return Assembler::rc_gez;
- }
- ShouldNotReachHere();
- return Assembler::rc_z;
-}
-
-// compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS
-void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) {
- tst(s1);
- br (c, a, p, L);
-}
-
-// Compares a pointer register with zero and branches on null.
-// Does a test & branch on 32-bit systems and a register-branch on 64-bit.
-void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) {
- assert_not_delayed();
-#ifdef _LP64
- bpr( rc_z, a, p, s1, L );
-#else
- tst(s1);
- br ( zero, a, p, L );
-#endif
-}
-
-void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) {
- assert_not_delayed();
-#ifdef _LP64
- bpr( rc_nz, a, p, s1, L );
-#else
- tst(s1);
- br ( notZero, a, p, L );
-#endif
-}
-
-// Compare registers and branch with nop in delay slot or cbcond without delay slot.
-
-// Compare integer (32 bit) values (icc only).
-void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c,
- Predict p, Label& L) {
- assert_not_delayed();
- if (use_cbcond(L)) {
- Assembler::cbcond(c, icc, s1, s2, L);
- } else {
- cmp(s1, s2);
- br(c, false, p, L);
- delayed()->nop();
- }
-}
-
-// Compare integer (32 bit) values (icc only).
-void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c,
- Predict p, Label& L) {
- assert_not_delayed();
- if (is_simm(simm13a,5) && use_cbcond(L)) {
- Assembler::cbcond(c, icc, s1, simm13a, L);
- } else {
- cmp(s1, simm13a);
- br(c, false, p, L);
- delayed()->nop();
- }
-}
-
-// Branch that tests xcc in LP64 and icc in !LP64
-void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c,
- Predict p, Label& L) {
- assert_not_delayed();
- if (use_cbcond(L)) {
- Assembler::cbcond(c, ptr_cc, s1, s2, L);
- } else {
- cmp(s1, s2);
- brx(c, false, p, L);
- delayed()->nop();
- }
-}
-
-// Branch that tests xcc in LP64 and icc in !LP64
-void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c,
- Predict p, Label& L) {
- assert_not_delayed();
- if (is_simm(simm13a,5) && use_cbcond(L)) {
- Assembler::cbcond(c, ptr_cc, s1, simm13a, L);
- } else {
- cmp(s1, simm13a);
- brx(c, false, p, L);
- delayed()->nop();
- }
-}
-
-// Short branch version for compares a pointer with zero.
-
-void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) {
- assert_not_delayed();
- if (use_cbcond(L)) {
- Assembler::cbcond(zero, ptr_cc, s1, 0, L);
- return;
- }
- br_null(s1, false, p, L);
- delayed()->nop();
-}
-
-void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) {
- assert_not_delayed();
- if (use_cbcond(L)) {
- Assembler::cbcond(notZero, ptr_cc, s1, 0, L);
- return;
- }
- br_notnull(s1, false, p, L);
- delayed()->nop();
-}
-
-// Unconditional short branch
-void MacroAssembler::ba_short(Label& L) {
- if (use_cbcond(L)) {
- Assembler::cbcond(equal, icc, G0, G0, L);
- return;
- }
- br(always, false, pt, L);
- delayed()->nop();
-}
-
-// instruction sequences factored across compiler & interpreter
-
-
-void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low,
- Register Rb_hi, Register Rb_low,
- Register Rresult) {
-
- Label check_low_parts, done;
-
- cmp(Ra_hi, Rb_hi ); // compare hi parts
- br(equal, true, pt, check_low_parts);
- delayed()->cmp(Ra_low, Rb_low); // test low parts
-
- // And, with an unsigned comparison, it does not matter if the numbers
- // are negative or not.
- // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff.
- // The second one is bigger (unsignedly).
-
- // Other notes: The first move in each triplet can be unconditional
- // (and therefore probably prefetchable).
- // And the equals case for the high part does not need testing,
- // since that triplet is reached only after finding the high halves differ.
-
- if (VM_Version::v9_instructions_work()) {
- mov(-1, Rresult);
- ba(done); delayed()-> movcc(greater, false, icc, 1, Rresult);
- } else {
- br(less, true, pt, done); delayed()-> set(-1, Rresult);
- br(greater, true, pt, done); delayed()-> set( 1, Rresult);
- }
-
- bind( check_low_parts );
-
- if (VM_Version::v9_instructions_work()) {
- mov( -1, Rresult);
- movcc(equal, false, icc, 0, Rresult);
- movcc(greaterUnsigned, false, icc, 1, Rresult);
- } else {
- set(-1, Rresult);
- br(equal, true, pt, done); delayed()->set( 0, Rresult);
- br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult);
- }
- bind( done );
-}
-
-void MacroAssembler::lneg( Register Rhi, Register Rlow ) {
- subcc( G0, Rlow, Rlow );
- subc( G0, Rhi, Rhi );
-}
-
-void MacroAssembler::lshl( Register Rin_high, Register Rin_low,
- Register Rcount,
- Register Rout_high, Register Rout_low,
- Register Rtemp ) {
-
-
- Register Ralt_count = Rtemp;
- Register Rxfer_bits = Rtemp;
-
- assert( Ralt_count != Rin_high
- && Ralt_count != Rin_low
- && Ralt_count != Rcount
- && Rxfer_bits != Rin_low
- && Rxfer_bits != Rin_high
- && Rxfer_bits != Rcount
- && Rxfer_bits != Rout_low
- && Rout_low != Rin_high,
- "register alias checks");
-
- Label big_shift, done;
-
- // This code can be optimized to use the 64 bit shifts in V9.
- // Here we use the 32 bit shifts.
-
- and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
- subcc(Rcount, 31, Ralt_count);
- br(greater, true, pn, big_shift);
- delayed()->dec(Ralt_count);
-
- // shift < 32 bits, Ralt_count = Rcount-31
-
- // We get the transfer bits by shifting right by 32-count the low
- // register. This is done by shifting right by 31-count and then by one
- // more to take care of the special (rare) case where count is zero
- // (shifting by 32 would not work).
-
- neg(Ralt_count);
-
- // The order of the next two instructions is critical in the case where
- // Rin and Rout are the same and should not be reversed.
-
- srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count
- if (Rcount != Rout_low) {
- sll(Rin_low, Rcount, Rout_low); // low half
- }
- sll(Rin_high, Rcount, Rout_high);
- if (Rcount == Rout_low) {
- sll(Rin_low, Rcount, Rout_low); // low half
- }
- srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more
- ba(done);
- delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low
-
- // shift >= 32 bits, Ralt_count = Rcount-32
- bind(big_shift);
- sll(Rin_low, Ralt_count, Rout_high );
- clr(Rout_low);
-
- bind(done);
-}
-
-
-void MacroAssembler::lshr( Register Rin_high, Register Rin_low,
- Register Rcount,
- Register Rout_high, Register Rout_low,
- Register Rtemp ) {
-
- Register Ralt_count = Rtemp;
- Register Rxfer_bits = Rtemp;
-
- assert( Ralt_count != Rin_high
- && Ralt_count != Rin_low
- && Ralt_count != Rcount
- && Rxfer_bits != Rin_low
- && Rxfer_bits != Rin_high
- && Rxfer_bits != Rcount
- && Rxfer_bits != Rout_high
- && Rout_high != Rin_low,
- "register alias checks");
-
- Label big_shift, done;
-
- // This code can be optimized to use the 64 bit shifts in V9.
- // Here we use the 32 bit shifts.
-
- and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
- subcc(Rcount, 31, Ralt_count);
- br(greater, true, pn, big_shift);
- delayed()->dec(Ralt_count);
-
- // shift < 32 bits, Ralt_count = Rcount-31
-
- // We get the transfer bits by shifting left by 32-count the high
- // register. This is done by shifting left by 31-count and then by one
- // more to take care of the special (rare) case where count is zero
- // (shifting by 32 would not work).
-
- neg(Ralt_count);
- if (Rcount != Rout_low) {
- srl(Rin_low, Rcount, Rout_low);
- }
-
- // The order of the next two instructions is critical in the case where
- // Rin and Rout are the same and should not be reversed.
-
- sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count
- sra(Rin_high, Rcount, Rout_high ); // high half
- sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more
- if (Rcount == Rout_low) {
- srl(Rin_low, Rcount, Rout_low);
- }
- ba(done);
- delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high
-
- // shift >= 32 bits, Ralt_count = Rcount-32
- bind(big_shift);
-
- sra(Rin_high, Ralt_count, Rout_low);
- sra(Rin_high, 31, Rout_high); // sign into hi
-
- bind( done );
-}
-
-
-
-void MacroAssembler::lushr( Register Rin_high, Register Rin_low,
- Register Rcount,
- Register Rout_high, Register Rout_low,
- Register Rtemp ) {
-
- Register Ralt_count = Rtemp;
- Register Rxfer_bits = Rtemp;
-
- assert( Ralt_count != Rin_high
- && Ralt_count != Rin_low
- && Ralt_count != Rcount
- && Rxfer_bits != Rin_low
- && Rxfer_bits != Rin_high
- && Rxfer_bits != Rcount
- && Rxfer_bits != Rout_high
- && Rout_high != Rin_low,
- "register alias checks");
-
- Label big_shift, done;
-
- // This code can be optimized to use the 64 bit shifts in V9.
- // Here we use the 32 bit shifts.
-
- and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
- subcc(Rcount, 31, Ralt_count);
- br(greater, true, pn, big_shift);
- delayed()->dec(Ralt_count);
-
- // shift < 32 bits, Ralt_count = Rcount-31
-
- // We get the transfer bits by shifting left by 32-count the high
- // register. This is done by shifting left by 31-count and then by one
- // more to take care of the special (rare) case where count is zero
- // (shifting by 32 would not work).
-
- neg(Ralt_count);
- if (Rcount != Rout_low) {
- srl(Rin_low, Rcount, Rout_low);
- }
-
- // The order of the next two instructions is critical in the case where
- // Rin and Rout are the same and should not be reversed.
-
- sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count
- srl(Rin_high, Rcount, Rout_high ); // high half
- sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more
- if (Rcount == Rout_low) {
- srl(Rin_low, Rcount, Rout_low);
- }
- ba(done);
- delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high
-
- // shift >= 32 bits, Ralt_count = Rcount-32
- bind(big_shift);
-
- srl(Rin_high, Ralt_count, Rout_low);
- clr(Rout_high);
-
- bind( done );
-}
-
-#ifdef _LP64
-void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) {
- cmp(Ra, Rb);
- mov(-1, Rresult);
- movcc(equal, false, xcc, 0, Rresult);
- movcc(greater, false, xcc, 1, Rresult);
-}
-#endif
-
-
-void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) {
- switch (size_in_bytes) {
- case 8: ld_long(src, dst); break;
- case 4: ld( src, dst); break;
- case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break;
- case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break;
- default: ShouldNotReachHere();
- }
-}
-
-void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) {
- switch (size_in_bytes) {
- case 8: st_long(src, dst); break;
- case 4: st( src, dst); break;
- case 2: sth( src, dst); break;
- case 1: stb( src, dst); break;
- default: ShouldNotReachHere();
- }
-}
-
-
-void MacroAssembler::float_cmp( bool is_float, int unordered_result,
- FloatRegister Fa, FloatRegister Fb,
- Register Rresult) {
-
- fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb);
-
- Condition lt = unordered_result == -1 ? f_unorderedOrLess : f_less;
- Condition eq = f_equal;
- Condition gt = unordered_result == 1 ? f_unorderedOrGreater : f_greater;
-
- if (VM_Version::v9_instructions_work()) {
-
- mov(-1, Rresult);
- movcc(eq, true, fcc0, 0, Rresult);
- movcc(gt, true, fcc0, 1, Rresult);
-
- } else {
- Label done;
-
- set( -1, Rresult );
- //fb(lt, true, pn, done); delayed()->set( -1, Rresult );
- fb( eq, true, pn, done); delayed()->set( 0, Rresult );
- fb( gt, true, pn, done); delayed()->set( 1, Rresult );
-
- bind (done);
- }
-}
-
-
-void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
-{
- if (VM_Version::v9_instructions_work()) {
- Assembler::fneg(w, s, d);
- } else {
- if (w == FloatRegisterImpl::S) {
- Assembler::fneg(w, s, d);
- } else if (w == FloatRegisterImpl::D) {
- // number() does a sanity check on the alignment.
- assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
- ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
-
- Assembler::fneg(FloatRegisterImpl::S, s, d);
- Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
- } else {
- assert(w == FloatRegisterImpl::Q, "Invalid float register width");
-
- // number() does a sanity check on the alignment.
- assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
- ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
-
- Assembler::fneg(FloatRegisterImpl::S, s, d);
- Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
- Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
- Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
- }
- }
-}
-
-void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
-{
- if (VM_Version::v9_instructions_work()) {
- Assembler::fmov(w, s, d);
- } else {
- if (w == FloatRegisterImpl::S) {
- Assembler::fmov(w, s, d);
- } else if (w == FloatRegisterImpl::D) {
- // number() does a sanity check on the alignment.
- assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
- ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
-
- Assembler::fmov(FloatRegisterImpl::S, s, d);
- Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
- } else {
- assert(w == FloatRegisterImpl::Q, "Invalid float register width");
-
- // number() does a sanity check on the alignment.
- assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
- ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
-
- Assembler::fmov(FloatRegisterImpl::S, s, d);
- Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
- Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
- Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
- }
- }
-}
-
-void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
-{
- if (VM_Version::v9_instructions_work()) {
- Assembler::fabs(w, s, d);
- } else {
- if (w == FloatRegisterImpl::S) {
- Assembler::fabs(w, s, d);
- } else if (w == FloatRegisterImpl::D) {
- // number() does a sanity check on the alignment.
- assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
- ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
-
- Assembler::fabs(FloatRegisterImpl::S, s, d);
- Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
- } else {
- assert(w == FloatRegisterImpl::Q, "Invalid float register width");
-
- // number() does a sanity check on the alignment.
- assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
- ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
-
- Assembler::fabs(FloatRegisterImpl::S, s, d);
- Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
- Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
- Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
- }
- }
-}
-
-void MacroAssembler::save_all_globals_into_locals() {
- mov(G1,L1);
- mov(G2,L2);
- mov(G3,L3);
- mov(G4,L4);
- mov(G5,L5);
- mov(G6,L6);
- mov(G7,L7);
-}
-
-void MacroAssembler::restore_globals_from_locals() {
- mov(L1,G1);
- mov(L2,G2);
- mov(L3,G3);
- mov(L4,G4);
- mov(L5,G5);
- mov(L6,G6);
- mov(L7,G7);
-}
-
-// Use for 64 bit operation.
-void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
-{
- // store ptr_reg as the new top value
-#ifdef _LP64
- casx(top_ptr_reg, top_reg, ptr_reg);
-#else
- cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm);
-#endif // _LP64
-}
-
-// [RGV] This routine does not handle 64 bit operations.
-// use casx_under_lock() or casx directly!!!
-void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
-{
- // store ptr_reg as the new top value
- if (VM_Version::v9_instructions_work()) {
- cas(top_ptr_reg, top_reg, ptr_reg);
- } else {
-
- // If the register is not an out nor global, it is not visible
- // after the save. Allocate a register for it, save its
- // value in the register save area (the save may not flush
- // registers to the save area).
-
- Register top_ptr_reg_after_save;
- Register top_reg_after_save;
- Register ptr_reg_after_save;
-
- if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) {
- top_ptr_reg_after_save = top_ptr_reg->after_save();
- } else {
- Address reg_save_addr = top_ptr_reg->address_in_saved_window();
- top_ptr_reg_after_save = L0;
- st(top_ptr_reg, reg_save_addr);
- }
-
- if (top_reg->is_out() || top_reg->is_global()) {
- top_reg_after_save = top_reg->after_save();
- } else {
- Address reg_save_addr = top_reg->address_in_saved_window();
- top_reg_after_save = L1;
- st(top_reg, reg_save_addr);
- }
-
- if (ptr_reg->is_out() || ptr_reg->is_global()) {
- ptr_reg_after_save = ptr_reg->after_save();
- } else {
- Address reg_save_addr = ptr_reg->address_in_saved_window();
- ptr_reg_after_save = L2;
- st(ptr_reg, reg_save_addr);
- }
-
- const Register& lock_reg = L3;
- const Register& lock_ptr_reg = L4;
- const Register& value_reg = L5;
- const Register& yield_reg = L6;
- const Register& yieldall_reg = L7;
-
- save_frame();
-
- if (top_ptr_reg_after_save == L0) {
- ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save);
- }
-
- if (top_reg_after_save == L1) {
- ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save);
- }
-
- if (ptr_reg_after_save == L2) {
- ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save);
- }
-
- Label(retry_get_lock);
- Label(not_same);
- Label(dont_yield);
-
- assert(lock_addr, "lock_address should be non null for v8");
- set((intptr_t)lock_addr, lock_ptr_reg);
- // Initialize yield counter
- mov(G0,yield_reg);
- mov(G0, yieldall_reg);
- set(StubRoutines::Sparc::locked, lock_reg);
-
- bind(retry_get_lock);
- cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dont_yield);
-
- if(use_call_vm) {
- Untested("Need to verify global reg consistancy");
- call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg);
- } else {
- // Save the regs and make space for a C call
- save(SP, -96, SP);
- save_all_globals_into_locals();
- call(CAST_FROM_FN_PTR(address,os::yield_all));
- delayed()->mov(yieldall_reg, O0);
- restore_globals_from_locals();
- restore();
- }
-
- // reset the counter
- mov(G0,yield_reg);
- add(yieldall_reg, 1, yieldall_reg);
-
- bind(dont_yield);
- // try to get lock
- swap(lock_ptr_reg, 0, lock_reg);
-
- // did we get the lock?
- cmp(lock_reg, StubRoutines::Sparc::unlocked);
- br(Assembler::notEqual, true, Assembler::pn, retry_get_lock);
- delayed()->add(yield_reg,1,yield_reg);
-
- // yes, got lock. do we have the same top?
- ld(top_ptr_reg_after_save, 0, value_reg);
- cmp_and_br_short(value_reg, top_reg_after_save, Assembler::notEqual, Assembler::pn, not_same);
-
- // yes, same top.
- st(ptr_reg_after_save, top_ptr_reg_after_save, 0);
- membar(Assembler::StoreStore);
-
- bind(not_same);
- mov(value_reg, ptr_reg_after_save);
- st(lock_reg, lock_ptr_reg, 0); // unlock
-
- restore();
- }
-}
-
-RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
- Register tmp,
- int offset) {
- intptr_t value = *delayed_value_addr;
- if (value != 0)
- return RegisterOrConstant(value + offset);
-
- // load indirectly to solve generation ordering problem
- AddressLiteral a(delayed_value_addr);
- load_ptr_contents(a, tmp);
-
-#ifdef ASSERT
- tst(tmp);
- breakpoint_trap(zero, xcc);
-#endif
-
- if (offset != 0)
- add(tmp, offset, tmp);
-
- return RegisterOrConstant(tmp);
-}
-
-
-RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
- assert(d.register_or_noreg() != G0, "lost side effect");
- if ((s2.is_constant() && s2.as_constant() == 0) ||
- (s2.is_register() && s2.as_register() == G0)) {
- // Do nothing, just move value.
- if (s1.is_register()) {
- if (d.is_constant()) d = temp;
- mov(s1.as_register(), d.as_register());
- return d;
- } else {
- return s1;
- }
- }
-
- if (s1.is_register()) {
- assert_different_registers(s1.as_register(), temp);
- if (d.is_constant()) d = temp;
- andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
- return d;
- } else {
- if (s2.is_register()) {
- assert_different_registers(s2.as_register(), temp);
- if (d.is_constant()) d = temp;
- set(s1.as_constant(), temp);
- andn(temp, s2.as_register(), d.as_register());
- return d;
- } else {
- intptr_t res = s1.as_constant() & ~s2.as_constant();
- return res;
- }
- }
-}
-
-RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
- assert(d.register_or_noreg() != G0, "lost side effect");
- if ((s2.is_constant() && s2.as_constant() == 0) ||
- (s2.is_register() && s2.as_register() == G0)) {
- // Do nothing, just move value.
- if (s1.is_register()) {
- if (d.is_constant()) d = temp;
- mov(s1.as_register(), d.as_register());
- return d;
- } else {
- return s1;
- }
- }
-
- if (s1.is_register()) {
- assert_different_registers(s1.as_register(), temp);
- if (d.is_constant()) d = temp;
- add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
- return d;
- } else {
- if (s2.is_register()) {
- assert_different_registers(s2.as_register(), temp);
- if (d.is_constant()) d = temp;
- add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register());
- return d;
- } else {
- intptr_t res = s1.as_constant() + s2.as_constant();
- return res;
- }
- }
-}
-
-RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
- assert(d.register_or_noreg() != G0, "lost side effect");
- if (!is_simm13(s2.constant_or_zero()))
- s2 = (s2.as_constant() & 0xFF);
- if ((s2.is_constant() && s2.as_constant() == 0) ||
- (s2.is_register() && s2.as_register() == G0)) {
- // Do nothing, just move value.
- if (s1.is_register()) {
- if (d.is_constant()) d = temp;
- mov(s1.as_register(), d.as_register());
- return d;
- } else {
- return s1;
- }
- }
-
- if (s1.is_register()) {
- assert_different_registers(s1.as_register(), temp);
- if (d.is_constant()) d = temp;
- sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
- return d;
- } else {
- if (s2.is_register()) {
- assert_different_registers(s2.as_register(), temp);
- if (d.is_constant()) d = temp;
- set(s1.as_constant(), temp);
- sll_ptr(temp, s2.as_register(), d.as_register());
- return d;
- } else {
- intptr_t res = s1.as_constant() << s2.as_constant();
- return res;
- }
- }
-}
-
-
-// Look up the method for a megamorphic invokeinterface call.
-// The target method is determined by <intf_klass, itable_index>.
-// The receiver klass is in recv_klass.
-// On success, the result will be in method_result, and execution falls through.
-// On failure, execution transfers to the given label.
-void MacroAssembler::lookup_interface_method(Register recv_klass,
- Register intf_klass,
- RegisterOrConstant itable_index,
- Register method_result,
- Register scan_temp,
- Register sethi_temp,
- Label& L_no_such_interface) {
- assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
- assert(itable_index.is_constant() || itable_index.as_register() == method_result,
- "caller must use same register for non-constant itable index as for method");
-
- Label L_no_such_interface_restore;
- bool did_save = false;
- if (scan_temp == noreg || sethi_temp == noreg) {
- Register recv_2 = recv_klass->is_global() ? recv_klass : L0;
- Register intf_2 = intf_klass->is_global() ? intf_klass : L1;
- assert(method_result->is_global(), "must be able to return value");
- scan_temp = L2;
- sethi_temp = L3;
- save_frame_and_mov(0, recv_klass, recv_2, intf_klass, intf_2);
- recv_klass = recv_2;
- intf_klass = intf_2;
- did_save = true;
- }
-
- // Compute start of first itableOffsetEntry (which is at the end of the vtable)
- int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
- int scan_step = itableOffsetEntry::size() * wordSize;
- int vte_size = vtableEntry::size() * wordSize;
-
- lduw(recv_klass, InstanceKlass::vtable_length_offset() * wordSize, scan_temp);
- // %%% We should store the aligned, prescaled offset in the klassoop.
- // Then the next several instructions would fold away.
-
- int round_to_unit = ((HeapWordsPerLong > 1) ? BytesPerLong : 0);
- int itb_offset = vtable_base;
- if (round_to_unit != 0) {
- // hoist first instruction of round_to(scan_temp, BytesPerLong):
- itb_offset += round_to_unit - wordSize;
- }
- int itb_scale = exact_log2(vtableEntry::size() * wordSize);
- sll(scan_temp, itb_scale, scan_temp);
- add(scan_temp, itb_offset, scan_temp);
- if (round_to_unit != 0) {
- // Round up to align_object_offset boundary
- // see code for InstanceKlass::start_of_itable!
- // Was: round_to(scan_temp, BytesPerLong);
- // Hoisted: add(scan_temp, BytesPerLong-1, scan_temp);
- and3(scan_temp, -round_to_unit, scan_temp);
- }
- add(recv_klass, scan_temp, scan_temp);
-
- // Adjust recv_klass by scaled itable_index, so we can free itable_index.
- RegisterOrConstant itable_offset = itable_index;
- itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset);
- itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset);
- add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass);
-
- // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
- // if (scan->interface() == intf) {
- // result = (klass + scan->offset() + itable_index);
- // }
- // }
- Label L_search, L_found_method;
-
- for (int peel = 1; peel >= 0; peel--) {
- // %%%% Could load both offset and interface in one ldx, if they were
- // in the opposite order. This would save a load.
- ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result);
-
- // Check that this entry is non-null. A null entry means that
- // the receiver class doesn't implement the interface, and wasn't the
- // same as when the caller was compiled.
- bpr(Assembler::rc_z, false, Assembler::pn, method_result, did_save ? L_no_such_interface_restore : L_no_such_interface);
- delayed()->cmp(method_result, intf_klass);
-
- if (peel) {
- brx(Assembler::equal, false, Assembler::pt, L_found_method);
- } else {
- brx(Assembler::notEqual, false, Assembler::pn, L_search);
- // (invert the test to fall through to found_method...)
- }
- delayed()->add(scan_temp, scan_step, scan_temp);
-
- if (!peel) break;
-
- bind(L_search);
- }
-
- bind(L_found_method);
-
- // Got a hit.
- int ito_offset = itableOffsetEntry::offset_offset_in_bytes();
- // scan_temp[-scan_step] points to the vtable offset we need
- ito_offset -= scan_step;
- lduw(scan_temp, ito_offset, scan_temp);
- ld_ptr(recv_klass, scan_temp, method_result);
-
- if (did_save) {
- Label L_done;
- ba(L_done);
- delayed()->restore();
-
- bind(L_no_such_interface_restore);
- ba(L_no_such_interface);
- delayed()->restore();
-
- bind(L_done);
- }
-}
-
-
-// virtual method calling
-void MacroAssembler::lookup_virtual_method(Register recv_klass,
- RegisterOrConstant vtable_index,
- Register method_result) {
- assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg());
- Register sethi_temp = method_result;
- const int base = (InstanceKlass::vtable_start_offset() * wordSize +
- // method pointer offset within the vtable entry:
- vtableEntry::method_offset_in_bytes());
- RegisterOrConstant vtable_offset = vtable_index;
- // Each of the following three lines potentially generates an instruction.
- // But the total number of address formation instructions will always be
- // at most two, and will often be zero. In any case, it will be optimal.
- // If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x).
- // If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t).
- vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size() * wordSize), vtable_offset);
- vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp);
- Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp));
- ld_ptr(vtable_entry_addr, method_result);
-}
-
-
-void MacroAssembler::check_klass_subtype(Register sub_klass,
- Register super_klass,
- Register temp_reg,
- Register temp2_reg,
- Label& L_success) {
- Register sub_2 = sub_klass;
- Register sup_2 = super_klass;
- if (!sub_2->is_global()) sub_2 = L0;
- if (!sup_2->is_global()) sup_2 = L1;
- bool did_save = false;
- if (temp_reg == noreg || temp2_reg == noreg) {
- temp_reg = L2;
- temp2_reg = L3;
- save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2);
- sub_klass = sub_2;
- super_klass = sup_2;
- did_save = true;
- }
- Label L_failure, L_pop_to_failure, L_pop_to_success;
- check_klass_subtype_fast_path(sub_klass, super_klass,
- temp_reg, temp2_reg,
- (did_save ? &L_pop_to_success : &L_success),
- (did_save ? &L_pop_to_failure : &L_failure), NULL);
-
- if (!did_save)
- save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2);
- check_klass_subtype_slow_path(sub_2, sup_2,
- L2, L3, L4, L5,
- NULL, &L_pop_to_failure);
-
- // on success:
- bind(L_pop_to_success);
- restore();
- ba_short(L_success);
-
- // on failure:
- bind(L_pop_to_failure);
- restore();
- bind(L_failure);
-}
-
-
-void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
- Register super_klass,
- Register temp_reg,
- Register temp2_reg,
- Label* L_success,
- Label* L_failure,
- Label* L_slow_path,
- RegisterOrConstant super_check_offset) {
- int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
- int sco_offset = in_bytes(Klass::super_check_offset_offset());
-
- bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
- bool need_slow_path = (must_load_sco ||
- super_check_offset.constant_or_zero() == sco_offset);
-
- assert_different_registers(sub_klass, super_klass, temp_reg);
- if (super_check_offset.is_register()) {
- assert_different_registers(sub_klass, super_klass, temp_reg,
- super_check_offset.as_register());
- } else if (must_load_sco) {
- assert(temp2_reg != noreg, "supply either a temp or a register offset");
- }
-
- Label L_fallthrough;
- int label_nulls = 0;
- if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
- if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
- if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
- assert(label_nulls <= 1 ||
- (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
- "at most one NULL in the batch, usually");
-
- // If the pointers are equal, we are done (e.g., String[] elements).
- // This self-check enables sharing of secondary supertype arrays among
- // non-primary types such as array-of-interface. Otherwise, each such
- // type would need its own customized SSA.
- // We move this check to the front of the fast path because many
- // type checks are in fact trivially successful in this manner,
- // so we get a nicely predicted branch right at the start of the check.
- cmp(super_klass, sub_klass);
- brx(Assembler::equal, false, Assembler::pn, *L_success);
- delayed()->nop();
-
- // Check the supertype display:
- if (must_load_sco) {
- // The super check offset is always positive...
- lduw(super_klass, sco_offset, temp2_reg);
- super_check_offset = RegisterOrConstant(temp2_reg);
- // super_check_offset is register.
- assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register());
- }
- ld_ptr(sub_klass, super_check_offset, temp_reg);
- cmp(super_klass, temp_reg);
-
- // This check has worked decisively for primary supers.
- // Secondary supers are sought in the super_cache ('super_cache_addr').
- // (Secondary supers are interfaces and very deeply nested subtypes.)
- // This works in the same check above because of a tricky aliasing
- // between the super_cache and the primary super display elements.
- // (The 'super_check_addr' can address either, as the case requires.)
- // Note that the cache is updated below if it does not help us find
- // what we need immediately.
- // So if it was a primary super, we can just fail immediately.
- // Otherwise, it's the slow path for us (no success at this point).
-
- // Hacked ba(), which may only be used just before L_fallthrough.
-#define FINAL_JUMP(label) \
- if (&(label) != &L_fallthrough) { \
- ba(label); delayed()->nop(); \
- }
-
- if (super_check_offset.is_register()) {
- brx(Assembler::equal, false, Assembler::pn, *L_success);
- delayed()->cmp(super_check_offset.as_register(), sc_offset);
-
- if (L_failure == &L_fallthrough) {
- brx(Assembler::equal, false, Assembler::pt, *L_slow_path);
- delayed()->nop();
- } else {
- brx(Assembler::notEqual, false, Assembler::pn, *L_failure);
- delayed()->nop();
- FINAL_JUMP(*L_slow_path);
- }
- } else if (super_check_offset.as_constant() == sc_offset) {
- // Need a slow path; fast failure is impossible.
- if (L_slow_path == &L_fallthrough) {
- brx(Assembler::equal, false, Assembler::pt, *L_success);
- delayed()->nop();
- } else {
- brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path);
- delayed()->nop();
- FINAL_JUMP(*L_success);
- }
- } else {
- // No slow path; it's a fast decision.
- if (L_failure == &L_fallthrough) {
- brx(Assembler::equal, false, Assembler::pt, *L_success);
- delayed()->nop();
- } else {
- brx(Assembler::notEqual, false, Assembler::pn, *L_failure);
- delayed()->nop();
- FINAL_JUMP(*L_success);
- }
- }
-
- bind(L_fallthrough);
-
-#undef FINAL_JUMP
-}
-
-
-void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
- Register super_klass,
- Register count_temp,
- Register scan_temp,
- Register scratch_reg,
- Register coop_reg,
- Label* L_success,
- Label* L_failure) {
- assert_different_registers(sub_klass, super_klass,
- count_temp, scan_temp, scratch_reg, coop_reg);
-
- Label L_fallthrough, L_loop;
- int label_nulls = 0;
- if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
- if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
- assert(label_nulls <= 1, "at most one NULL in the batch");
-
- // a couple of useful fields in sub_klass:
- int ss_offset = in_bytes(Klass::secondary_supers_offset());
- int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
-
- // Do a linear scan of the secondary super-klass chain.
- // This code is rarely used, so simplicity is a virtue here.
-
-#ifndef PRODUCT
- int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
- inc_counter((address) pst_counter, count_temp, scan_temp);
-#endif
-
- // We will consult the secondary-super array.
- ld_ptr(sub_klass, ss_offset, scan_temp);
-
- Register search_key = super_klass;
-
- // Load the array length. (Positive movl does right thing on LP64.)
- lduw(scan_temp, Array<Klass*>::length_offset_in_bytes(), count_temp);
-
- // Check for empty secondary super list
- tst(count_temp);
-
- // In the array of super classes elements are pointer sized.
- int element_size = wordSize;
-
- // Top of search loop
- bind(L_loop);
- br(Assembler::equal, false, Assembler::pn, *L_failure);
- delayed()->add(scan_temp, element_size, scan_temp);
-
- // Skip the array header in all array accesses.
- int elem_offset = Array<Klass*>::base_offset_in_bytes();
- elem_offset -= element_size; // the scan pointer was pre-incremented also
-
- // Load next super to check
- ld_ptr( scan_temp, elem_offset, scratch_reg );
-
- // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
- cmp(scratch_reg, search_key);
-
- // A miss means we are NOT a subtype and need to keep looping
- brx(Assembler::notEqual, false, Assembler::pn, L_loop);
- delayed()->deccc(count_temp); // decrement trip counter in delay slot
-
- // Success. Cache the super we found and proceed in triumph.
- st_ptr(super_klass, sub_klass, sc_offset);
-
- if (L_success != &L_fallthrough) {
- ba(*L_success);
- delayed()->nop();
- }
-
- bind(L_fallthrough);
-}
-
-
-RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
- Register temp_reg,
- int extra_slot_offset) {
- // cf. TemplateTable::prepare_invoke(), if (load_receiver).
- int stackElementSize = Interpreter::stackElementSize;
- int offset = extra_slot_offset * stackElementSize;
- if (arg_slot.is_constant()) {
- offset += arg_slot.as_constant() * stackElementSize;
- return offset;
- } else {
- assert(temp_reg != noreg, "must specify");
- sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg);
- if (offset != 0)
- add(temp_reg, offset, temp_reg);
- return temp_reg;
- }
-}
-
-
-Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
- Register temp_reg,
- int extra_slot_offset) {
- return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset));
-}
-
-
-void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
- Register temp_reg,
- Label& done, Label* slow_case,
- BiasedLockingCounters* counters) {
- assert(UseBiasedLocking, "why call this otherwise?");
-
- if (PrintBiasedLockingStatistics) {
- assert_different_registers(obj_reg, mark_reg, temp_reg, O7);
- if (counters == NULL)
- counters = BiasedLocking::counters();
- }
-
- Label cas_label;
-
- // Biased locking
- // See whether the lock is currently biased toward our thread and
- // whether the epoch is still valid
- // Note that the runtime guarantees sufficient alignment of JavaThread
- // pointers to allow age to be placed into low bits
- assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
- and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
- cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label);
-
- load_klass(obj_reg, temp_reg);
- ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
- or3(G2_thread, temp_reg, temp_reg);
- xor3(mark_reg, temp_reg, temp_reg);
- andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
- if (counters != NULL) {
- cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg);
- // Reload mark_reg as we may need it later
- ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg);
- }
- brx(Assembler::equal, true, Assembler::pt, done);
- delayed()->nop();
-
- Label try_revoke_bias;
- Label try_rebias;
- Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes());
- assert(mark_addr.disp() == 0, "cas must take a zero displacement");
-
- // At this point we know that the header has the bias pattern and
- // that we are not the bias owner in the current epoch. We need to
- // figure out more details about the state of the header in order to
- // know what operations can be legally performed on the object's
- // header.
-
- // If the low three bits in the xor result aren't clear, that means
- // the prototype header is no longer biased and we have to revoke
- // the bias on this object.
- btst(markOopDesc::biased_lock_mask_in_place, temp_reg);
- brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias);
-
- // Biasing is still enabled for this data type. See whether the
- // epoch of the current bias is still valid, meaning that the epoch
- // bits of the mark word are equal to the epoch bits of the
- // prototype header. (Note that the prototype header's epoch bits
- // only change at a safepoint.) If not, attempt to rebias the object
- // toward the current thread. Note that we must be absolutely sure
- // that the current epoch is invalid in order to do this because
- // otherwise the manipulations it performs on the mark word are
- // illegal.
- delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg);
- brx(Assembler::notZero, false, Assembler::pn, try_rebias);
-
- // The epoch of the current bias is still valid but we know nothing
- // about the owner; it might be set or it might be clear. Try to
- // acquire the bias of the object using an atomic operation. If this
- // fails we will go in to the runtime to revoke the object's bias.
- // Note that we first construct the presumed unbiased header so we
- // don't accidentally blow away another thread's valid bias.
- delayed()->and3(mark_reg,
- markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
- mark_reg);
- or3(G2_thread, mark_reg, temp_reg);
- casn(mark_addr.base(), mark_reg, temp_reg);
- // If the biasing toward our thread failed, this means that
- // another thread succeeded in biasing it toward itself and we
- // need to revoke that bias. The revocation will occur in the
- // interpreter runtime in the slow case.
- cmp(mark_reg, temp_reg);
- if (counters != NULL) {
- cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg);
- }
- if (slow_case != NULL) {
- brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
- delayed()->nop();
- }
- ba_short(done);
-
- bind(try_rebias);
- // At this point we know the epoch has expired, meaning that the
- // current "bias owner", if any, is actually invalid. Under these
- // circumstances _only_, we are allowed to use the current header's
- // value as the comparison value when doing the cas to acquire the
- // bias in the current epoch. In other words, we allow transfer of
- // the bias from one thread to another directly in this situation.
- //
- // FIXME: due to a lack of registers we currently blow away the age
- // bits in this situation. Should attempt to preserve them.
- load_klass(obj_reg, temp_reg);
- ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
- or3(G2_thread, temp_reg, temp_reg);
- casn(mark_addr.base(), mark_reg, temp_reg);
- // If the biasing toward our thread failed, this means that
- // another thread succeeded in biasing it toward itself and we
- // need to revoke that bias. The revocation will occur in the
- // interpreter runtime in the slow case.
- cmp(mark_reg, temp_reg);
- if (counters != NULL) {
- cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg);
- }
- if (slow_case != NULL) {
- brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
- delayed()->nop();
- }
- ba_short(done);
-
- bind(try_revoke_bias);
- // The prototype mark in the klass doesn't have the bias bit set any
- // more, indicating that objects of this data type are not supposed
- // to be biased any more. We are going to try to reset the mark of
- // this object to the prototype value and fall through to the
- // CAS-based locking scheme. Note that if our CAS fails, it means
- // that another thread raced us for the privilege of revoking the
- // bias of this particular object, so it's okay to continue in the
- // normal locking code.
- //
- // FIXME: due to a lack of registers we currently blow away the age
- // bits in this situation. Should attempt to preserve them.
- load_klass(obj_reg, temp_reg);
- ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
- casn(mark_addr.base(), mark_reg, temp_reg);
- // Fall through to the normal CAS-based lock, because no matter what
- // the result of the above CAS, some thread must have succeeded in
- // removing the bias bit from the object's header.
- if (counters != NULL) {
- cmp(mark_reg, temp_reg);
- cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg);
- }
-
- bind(cas_label);
-}
-
-void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done,
- bool allow_delay_slot_filling) {
- // Check for biased locking unlock case, which is a no-op
- // Note: we do not have to check the thread ID for two reasons.
- // First, the interpreter checks for IllegalMonitorStateException at
- // a higher level. Second, if the bias was revoked while we held the
- // lock, the object could not be rebiased toward another thread, so
- // the bias bit would be clear.
- ld_ptr(mark_addr, temp_reg);
- and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
- cmp(temp_reg, markOopDesc::biased_lock_pattern);
- brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done);
- delayed();
- if (!allow_delay_slot_filling) {
- nop();
- }
-}
-
-
-// CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by
-// Solaris/SPARC's "as". Another apt name would be cas_ptr()
-
-void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) {
- casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
-}
-
-
-
-// compiler_lock_object() and compiler_unlock_object() are direct transliterations
-// of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments.
-// The code could be tightened up considerably.
-//
-// box->dhw disposition - post-conditions at DONE_LABEL.
-// - Successful inflated lock: box->dhw != 0.
-// Any non-zero value suffices.
-// Consider G2_thread, rsp, boxReg, or unused_mark()
-// - Successful Stack-lock: box->dhw == mark.
-// box->dhw must contain the displaced mark word value
-// - Failure -- icc.ZFlag == 0 and box->dhw is undefined.
-// The slow-path fast_enter() and slow_enter() operators
-// are responsible for setting box->dhw = NonZero (typically ::unused_mark).
-// - Biased: box->dhw is undefined
-//
-// SPARC refworkload performance - specifically jetstream and scimark - are
-// extremely sensitive to the size of the code emitted by compiler_lock_object
-// and compiler_unlock_object. Critically, the key factor is code size, not path
-// length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the
-// effect).
-
-
-void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
- Register Rbox, Register Rscratch,
- BiasedLockingCounters* counters,
- bool try_bias) {
- Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
-
- verify_oop(Roop);
- Label done ;
-
- if (counters != NULL) {
- inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch);
- }
-
- if (EmitSync & 1) {
- mov(3, Rscratch);
- st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
- cmp(SP, G0);
- return ;
- }
-
- if (EmitSync & 2) {
-
- // Fetch object's markword
- ld_ptr(mark_addr, Rmark);
-
- if (try_bias) {
- biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
- }
-
- // Save Rbox in Rscratch to be used for the cas operation
- mov(Rbox, Rscratch);
-
- // set Rmark to markOop | markOopDesc::unlocked_value
- or3(Rmark, markOopDesc::unlocked_value, Rmark);
-
- // Initialize the box. (Must happen before we update the object mark!)
- st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
-
- // compare object markOop with Rmark and if equal exchange Rscratch with object markOop
- assert(mark_addr.disp() == 0, "cas must take a zero displacement");
- casx_under_lock(mark_addr.base(), Rmark, Rscratch,
- (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
-
- // if compare/exchange succeeded we found an unlocked object and we now have locked it
- // hence we are done
- cmp(Rmark, Rscratch);
-#ifdef _LP64
- sub(Rscratch, STACK_BIAS, Rscratch);
-#endif
- brx(Assembler::equal, false, Assembler::pt, done);
- delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot
-
- // we did not find an unlocked object so see if this is a recursive case
- // sub(Rscratch, SP, Rscratch);
- assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
- andcc(Rscratch, 0xfffff003, Rscratch);
- st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
- bind (done);
- return ;
- }
-
- Label Egress ;
-
- if (EmitSync & 256) {
- Label IsInflated ;
-
- ld_ptr(mark_addr, Rmark); // fetch obj->mark
- // Triage: biased, stack-locked, neutral, inflated
- if (try_bias) {
- biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
- // Invariant: if control reaches this point in the emitted stream
- // then Rmark has not been modified.
- }
-
- // Store mark into displaced mark field in the on-stack basic-lock "box"
- // Critically, this must happen before the CAS
- // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty.
- st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
- andcc(Rmark, 2, G0);
- brx(Assembler::notZero, false, Assembler::pn, IsInflated);
- delayed()->
-
- // Try stack-lock acquisition.
- // Beware: the 1st instruction is in a delay slot
- mov(Rbox, Rscratch);
- or3(Rmark, markOopDesc::unlocked_value, Rmark);
- assert(mark_addr.disp() == 0, "cas must take a zero displacement");
- casn(mark_addr.base(), Rmark, Rscratch);
- cmp(Rmark, Rscratch);
- brx(Assembler::equal, false, Assembler::pt, done);
- delayed()->sub(Rscratch, SP, Rscratch);
-
- // Stack-lock attempt failed - check for recursive stack-lock.
- // See the comments below about how we might remove this case.
-#ifdef _LP64
- sub(Rscratch, STACK_BIAS, Rscratch);
-#endif
- assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
- andcc(Rscratch, 0xfffff003, Rscratch);
- br(Assembler::always, false, Assembler::pt, done);
- delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
-
- bind(IsInflated);
- if (EmitSync & 64) {
- // If m->owner != null goto IsLocked
- // Pessimistic form: Test-and-CAS vs CAS
- // The optimistic form avoids RTS->RTO cache line upgrades.
- ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
- andcc(Rscratch, Rscratch, G0);
- brx(Assembler::notZero, false, Assembler::pn, done);
- delayed()->nop();
- // m->owner == null : it's unlocked.
- }
-
- // Try to CAS m->owner from null to Self
- // Invariant: if we acquire the lock then _recursions should be 0.
- add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
- mov(G2_thread, Rscratch);
- casn(Rmark, G0, Rscratch);
- cmp(Rscratch, G0);
- // Intentional fall-through into done
- } else {
- // Aggressively avoid the Store-before-CAS penalty
- // Defer the store into box->dhw until after the CAS
- Label IsInflated, Recursive ;
-
-// Anticipate CAS -- Avoid RTS->RTO upgrade
-// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
-
- ld_ptr(mark_addr, Rmark); // fetch obj->mark
- // Triage: biased, stack-locked, neutral, inflated
-
- if (try_bias) {
- biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
- // Invariant: if control reaches this point in the emitted stream
- // then Rmark has not been modified.
- }
- andcc(Rmark, 2, G0);
- brx(Assembler::notZero, false, Assembler::pn, IsInflated);
- delayed()-> // Beware - dangling delay-slot
-
- // Try stack-lock acquisition.
- // Transiently install BUSY (0) encoding in the mark word.
- // if the CAS of 0 into the mark was successful then we execute:
- // ST box->dhw = mark -- save fetched mark in on-stack basiclock box
- // ST obj->mark = box -- overwrite transient 0 value
- // This presumes TSO, of course.
-
- mov(0, Rscratch);
- or3(Rmark, markOopDesc::unlocked_value, Rmark);
- assert(mark_addr.disp() == 0, "cas must take a zero displacement");
- casn(mark_addr.base(), Rmark, Rscratch);
-// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
- cmp(Rscratch, Rmark);
- brx(Assembler::notZero, false, Assembler::pn, Recursive);
- delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
- if (counters != NULL) {
- cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
- }
- ba(done);
- delayed()->st_ptr(Rbox, mark_addr);
-
- bind(Recursive);
- // Stack-lock attempt failed - check for recursive stack-lock.
- // Tests show that we can remove the recursive case with no impact
- // on refworkload 0.83. If we need to reduce the size of the code
- // emitted by compiler_lock_object() the recursive case is perfect
- // candidate.
- //
- // A more extreme idea is to always inflate on stack-lock recursion.
- // This lets us eliminate the recursive checks in compiler_lock_object
- // and compiler_unlock_object and the (box->dhw == 0) encoding.
- // A brief experiment - requiring changes to synchronizer.cpp, interpreter,
- // and showed a performance *increase*. In the same experiment I eliminated
- // the fast-path stack-lock code from the interpreter and always passed
- // control to the "slow" operators in synchronizer.cpp.
-
- // RScratch contains the fetched obj->mark value from the failed CASN.
-#ifdef _LP64
- sub(Rscratch, STACK_BIAS, Rscratch);
-#endif
- sub(Rscratch, SP, Rscratch);
- assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
- andcc(Rscratch, 0xfffff003, Rscratch);
- if (counters != NULL) {
- // Accounting needs the Rscratch register
- st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
- cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
- ba_short(done);
- } else {
- ba(done);
- delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
- }
-
- bind (IsInflated);
- if (EmitSync & 64) {
- // If m->owner != null goto IsLocked
- // Test-and-CAS vs CAS
- // Pessimistic form avoids futile (doomed) CAS attempts
- // The optimistic form avoids RTS->RTO cache line upgrades.
- ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
- andcc(Rscratch, Rscratch, G0);
- brx(Assembler::notZero, false, Assembler::pn, done);
- delayed()->nop();
- // m->owner == null : it's unlocked.
- }
-
- // Try to CAS m->owner from null to Self
- // Invariant: if we acquire the lock then _recursions should be 0.
- add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
- mov(G2_thread, Rscratch);
- casn(Rmark, G0, Rscratch);
- cmp(Rscratch, G0);
- // ST box->displaced_header = NonZero.
- // Any non-zero value suffices:
- // unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
- st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes());
- // Intentional fall-through into done
- }
-
- bind (done);
-}
-
-void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
- Register Rbox, Register Rscratch,
- bool try_bias) {
- Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
-
- Label done ;
-
- if (EmitSync & 4) {
- cmp(SP, G0);
- return ;
- }
-
- if (EmitSync & 8) {
- if (try_bias) {
- biased_locking_exit(mark_addr, Rscratch, done);
- }
-
- // Test first if it is a fast recursive unlock
- ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark);
- br_null_short(Rmark, Assembler::pt, done);
-
- // Check if it is still a light weight lock, this is is true if we see
- // the stack address of the basicLock in the markOop of the object
- assert(mark_addr.disp() == 0, "cas must take a zero displacement");
- casx_under_lock(mark_addr.base(), Rbox, Rmark,
- (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
- ba(done);
- delayed()->cmp(Rbox, Rmark);
- bind(done);
- return ;
- }
-
- // Beware ... If the aggregate size of the code emitted by CLO and CUO is
- // is too large performance rolls abruptly off a cliff.
- // This could be related to inlining policies, code cache management, or
- // I$ effects.
- Label LStacked ;
-
- if (try_bias) {
- // TODO: eliminate redundant LDs of obj->mark
- biased_locking_exit(mark_addr, Rscratch, done);
- }
-
- ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark);
- ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch);
- andcc(Rscratch, Rscratch, G0);
- brx(Assembler::zero, false, Assembler::pn, done);
- delayed()->nop(); // consider: relocate fetch of mark, above, into this DS
- andcc(Rmark, 2, G0);
- brx(Assembler::zero, false, Assembler::pt, LStacked);
- delayed()->nop();
-
- // It's inflated
- // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
- // the ST of 0 into _owner which releases the lock. This prevents loads
- // and stores within the critical section from reordering (floating)
- // past the store that releases the lock. But TSO is a strong memory model
- // and that particular flavor of barrier is a noop, so we can safely elide it.
- // Note that we use 1-0 locking by default for the inflated case. We
- // close the resultant (and rare) race by having contented threads in
- // monitorenter periodically poll _owner.
- ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
- ld_ptr(Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox);
- xor3(Rscratch, G2_thread, Rscratch);
- orcc(Rbox, Rscratch, Rbox);
- brx(Assembler::notZero, false, Assembler::pn, done);
- delayed()->
- ld_ptr(Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch);
- ld_ptr(Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox);
- orcc(Rbox, Rscratch, G0);
- if (EmitSync & 65536) {
- Label LSucc ;
- brx(Assembler::notZero, false, Assembler::pn, LSucc);
- delayed()->nop();
- ba(done);
- delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
-
- bind(LSucc);
- st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
- if (os::is_MP()) { membar (StoreLoad); }
- ld_ptr(Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch);
- andcc(Rscratch, Rscratch, G0);
- brx(Assembler::notZero, false, Assembler::pt, done);
- delayed()->andcc(G0, G0, G0);
- add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
- mov(G2_thread, Rscratch);
- casn(Rmark, G0, Rscratch);
- // invert icc.zf and goto done
- br_notnull(Rscratch, false, Assembler::pt, done);
- delayed()->cmp(G0, G0);
- ba(done);
- delayed()->cmp(G0, 1);
- } else {
- brx(Assembler::notZero, false, Assembler::pn, done);
- delayed()->nop();
- ba(done);
- delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
- }
-
- bind (LStacked);
- // Consider: we could replace the expensive CAS in the exit
- // path with a simple ST of the displaced mark value fetched from
- // the on-stack basiclock box. That admits a race where a thread T2
- // in the slow lock path -- inflating with monitor M -- could race a
- // thread T1 in the fast unlock path, resulting in a missed wakeup for T2.
- // More precisely T1 in the stack-lock unlock path could "stomp" the
- // inflated mark value M installed by T2, resulting in an orphan
- // object monitor M and T2 becoming stranded. We can remedy that situation
- // by having T2 periodically poll the object's mark word using timed wait
- // operations. If T2 discovers that a stomp has occurred it vacates
- // the monitor M and wakes any other threads stranded on the now-orphan M.
- // In addition the monitor scavenger, which performs deflation,
- // would also need to check for orpan monitors and stranded threads.
- //
- // Finally, inflation is also used when T2 needs to assign a hashCode
- // to O and O is stack-locked by T1. The "stomp" race could cause
- // an assigned hashCode value to be lost. We can avoid that condition
- // and provide the necessary hashCode stability invariants by ensuring
- // that hashCode generation is idempotent between copying GCs.
- // For example we could compute the hashCode of an object O as
- // O's heap address XOR some high quality RNG value that is refreshed
- // at GC-time. The monitor scavenger would install the hashCode
- // found in any orphan monitors. Again, the mechanism admits a
- // lost-update "stomp" WAW race but detects and recovers as needed.
- //
- // A prototype implementation showed excellent results, although
- // the scavenger and timeout code was rather involved.
-
- casn(mark_addr.base(), Rbox, Rscratch);
- cmp(Rbox, Rscratch);
- // Intentional fall through into done ...
-
- bind(done);
-}
-
-
-
-void MacroAssembler::print_CPU_state() {
- // %%%%% need to implement this
-}
-
-void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
- // %%%%% need to implement this
-}
-
-void MacroAssembler::push_IU_state() {
- // %%%%% need to implement this
-}
-
-
-void MacroAssembler::pop_IU_state() {
- // %%%%% need to implement this
-}
-
-
-void MacroAssembler::push_FPU_state() {
- // %%%%% need to implement this
-}
-
-
-void MacroAssembler::pop_FPU_state() {
- // %%%%% need to implement this
-}
-
-
-void MacroAssembler::push_CPU_state() {
- // %%%%% need to implement this
-}
-
-
-void MacroAssembler::pop_CPU_state() {
- // %%%%% need to implement this
-}
-
-
-
-void MacroAssembler::verify_tlab() {
-#ifdef ASSERT
- if (UseTLAB && VerifyOops) {
- Label next, next2, ok;
- Register t1 = L0;
- Register t2 = L1;
- Register t3 = L2;
-
- save_frame(0);
- ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
- ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2);
- or3(t1, t2, t3);
- cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next);
- STOP("assert(top >= start)");
- should_not_reach_here();
-
- bind(next);
- ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
- ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2);
- or3(t3, t2, t3);
- cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2);
- STOP("assert(top <= end)");
- should_not_reach_here();
-
- bind(next2);
- and3(t3, MinObjAlignmentInBytesMask, t3);
- cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok);
- STOP("assert(aligned)");
- should_not_reach_here();
-
- bind(ok);
- restore();
- }
-#endif
-}
-
-
-void MacroAssembler::eden_allocate(
- Register obj, // result: pointer to object after successful allocation
- Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
- int con_size_in_bytes, // object size in bytes if known at compile time
- Register t1, // temp register
- Register t2, // temp register
- Label& slow_case // continuation point if fast allocation fails
-){
- // make sure arguments make sense
- assert_different_registers(obj, var_size_in_bytes, t1, t2);
- assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size");
- assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
-
- if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
- // No allocation in the shared eden.
- ba_short(slow_case);
- } else {
- // get eden boundaries
- // note: we need both top & top_addr!
- const Register top_addr = t1;
- const Register end = t2;
-
- CollectedHeap* ch = Universe::heap();
- set((intx)ch->top_addr(), top_addr);
- intx delta = (intx)ch->end_addr() - (intx)ch->top_addr();
- ld_ptr(top_addr, delta, end);
- ld_ptr(top_addr, 0, obj);
-
- // try to allocate
- Label retry;
- bind(retry);
-#ifdef ASSERT
- // make sure eden top is properly aligned
- {
- Label L;
- btst(MinObjAlignmentInBytesMask, obj);
- br(Assembler::zero, false, Assembler::pt, L);
- delayed()->nop();
- STOP("eden top is not properly aligned");
- bind(L);
- }
-#endif // ASSERT
- const Register free = end;
- sub(end, obj, free); // compute amount of free space
- if (var_size_in_bytes->is_valid()) {
- // size is unknown at compile time
- cmp(free, var_size_in_bytes);
- br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
- delayed()->add(obj, var_size_in_bytes, end);
- } else {
- // size is known at compile time
- cmp(free, con_size_in_bytes);
- br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
- delayed()->add(obj, con_size_in_bytes, end);
- }
- // Compare obj with the value at top_addr; if still equal, swap the value of
- // end with the value at top_addr. If not equal, read the value at top_addr
- // into end.
- casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
- // if someone beat us on the allocation, try again, otherwise continue
- cmp(obj, end);
- brx(Assembler::notEqual, false, Assembler::pn, retry);
- delayed()->mov(end, obj); // nop if successfull since obj == end
-
-#ifdef ASSERT
- // make sure eden top is properly aligned
- {
- Label L;
- const Register top_addr = t1;
-
- set((intx)ch->top_addr(), top_addr);
- ld_ptr(top_addr, 0, top_addr);
- btst(MinObjAlignmentInBytesMask, top_addr);
- br(Assembler::zero, false, Assembler::pt, L);
- delayed()->nop();
- STOP("eden top is not properly aligned");
- bind(L);
- }
-#endif // ASSERT
- }
-}
-
-
-void MacroAssembler::tlab_allocate(
- Register obj, // result: pointer to object after successful allocation
- Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
- int con_size_in_bytes, // object size in bytes if known at compile time
- Register t1, // temp register
- Label& slow_case // continuation point if fast allocation fails
-){
- // make sure arguments make sense
- assert_different_registers(obj, var_size_in_bytes, t1);
- assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size");
- assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
-
- const Register free = t1;
-
- verify_tlab();
-
- ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj);
-
- // calculate amount of free space
- ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free);
- sub(free, obj, free);
-
- Label done;
- if (var_size_in_bytes == noreg) {
- cmp(free, con_size_in_bytes);
- } else {
- cmp(free, var_size_in_bytes);
- }
- br(Assembler::less, false, Assembler::pn, slow_case);
- // calculate the new top pointer
- if (var_size_in_bytes == noreg) {
- delayed()->add(obj, con_size_in_bytes, free);
- } else {
- delayed()->add(obj, var_size_in_bytes, free);
- }
-
- bind(done);
-
-#ifdef ASSERT
- // make sure new free pointer is properly aligned
- {
- Label L;
- btst(MinObjAlignmentInBytesMask, free);
- br(Assembler::zero, false, Assembler::pt, L);
- delayed()->nop();
- STOP("updated TLAB free is not properly aligned");
- bind(L);
- }
-#endif // ASSERT
-
- // update the tlab top pointer
- st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
- verify_tlab();
-}
-
-
-void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) {
- Register top = O0;
- Register t1 = G1;
- Register t2 = G3;
- Register t3 = O1;
- assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */);
- Label do_refill, discard_tlab;
-
- if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
- // No allocation in the shared eden.
- ba_short(slow_case);
- }
-
- ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top);
- ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1);
- ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2);
-
- // calculate amount of free space
- sub(t1, top, t1);
- srl_ptr(t1, LogHeapWordSize, t1);
-
- // Retain tlab and allocate object in shared space if
- // the amount free in the tlab is too large to discard.
- cmp(t1, t2);
- brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab);
-
- // increment waste limit to prevent getting stuck on this slow path
- delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2);
- st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
- if (TLABStats) {
- // increment number of slow_allocations
- ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2);
- add(t2, 1, t2);
- stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()));
- }
- ba_short(try_eden);
-
- bind(discard_tlab);
- if (TLABStats) {
- // increment number of refills
- ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2);
- add(t2, 1, t2);
- stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()));
- // accumulate wastage
- ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2);
- add(t2, t1, t2);
- stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()));
- }
-
- // if tlab is currently allocated (top or end != null) then
- // fill [top, end + alignment_reserve) with array object
- br_null_short(top, Assembler::pn, do_refill);
-
- set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2);
- st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word
- // set klass to intArrayKlass
- sub(t1, typeArrayOopDesc::header_size(T_INT), t1);
- add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1);
- sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1);
- st(t1, top, arrayOopDesc::length_offset_in_bytes());
- set((intptr_t)Universe::intArrayKlassObj_addr(), t2);
- ld_ptr(t2, 0, t2);
- // store klass last. concurrent gcs assumes klass length is valid if
- // klass field is not null.
- store_klass(t2, top);
- verify_oop(top);
-
- ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1);
- sub(top, t1, t1); // size of tlab's allocated portion
- incr_allocated_bytes(t1, t2, t3);
-
- // refill the tlab with an eden allocation
- bind(do_refill);
- ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1);
- sll_ptr(t1, LogHeapWordSize, t1);
- // allocate new tlab, address returned in top
- eden_allocate(top, t1, 0, t2, t3, slow_case);
-
- st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset()));
- st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
-#ifdef ASSERT
- // check that tlab_size (t1) is still valid
- {
- Label ok;
- ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2);
- sll_ptr(t2, LogHeapWordSize, t2);
- cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok);
- STOP("assert(t1 == tlab_size)");
- should_not_reach_here();
-
- bind(ok);
- }
-#endif // ASSERT
- add(top, t1, top); // t1 is tlab_size
- sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top);
- st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset()));
- verify_tlab();
- ba_short(retry);
-}
-
-void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes,
- Register t1, Register t2) {
- // Bump total bytes allocated by this thread
- assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch
- assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2);
- // v8 support has gone the way of the dodo
- ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1);
- add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1);
- stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset()));
-}
-
-Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
- switch (cond) {
- // Note some conditions are synonyms for others
- case Assembler::never: return Assembler::always;
- case Assembler::zero: return Assembler::notZero;
- case Assembler::lessEqual: return Assembler::greater;
- case Assembler::less: return Assembler::greaterEqual;
- case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned;
- case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned;
- case Assembler::negative: return Assembler::positive;
- case Assembler::overflowSet: return Assembler::overflowClear;
- case Assembler::always: return Assembler::never;
- case Assembler::notZero: return Assembler::zero;
- case Assembler::greater: return Assembler::lessEqual;
- case Assembler::greaterEqual: return Assembler::less;
- case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned;
- case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned;
- case Assembler::positive: return Assembler::negative;
- case Assembler::overflowClear: return Assembler::overflowSet;
- }
-
- ShouldNotReachHere(); return Assembler::overflowClear;
-}
-
-void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr,
- Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) {
- Condition negated_cond = negate_condition(cond);
- Label L;
- brx(negated_cond, false, Assembler::pt, L);
- delayed()->nop();
- inc_counter(counter_ptr, Rtmp1, Rtmp2);
- bind(L);
-}
-
-void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) {
- AddressLiteral addrlit(counter_addr);
- sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register.
- Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits.
- ld(addr, Rtmp2);
- inc(Rtmp2);
- st(Rtmp2, addr);
-}
-
-void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) {
- inc_counter((address) counter_addr, Rtmp1, Rtmp2);
-}
-
-SkipIfEqual::SkipIfEqual(
- MacroAssembler* masm, Register temp, const bool* flag_addr,
- Assembler::Condition condition) {
- _masm = masm;
- AddressLiteral flag(flag_addr);
- _masm->sethi(flag, temp);
- _masm->ldub(temp, flag.low10(), temp);
- _masm->tst(temp);
- _masm->br(condition, false, Assembler::pt, _label);
- _masm->delayed()->nop();
-}
-
-SkipIfEqual::~SkipIfEqual() {
- _masm->bind(_label);
-}
-
-
-// Writes to stack successive pages until offset reached to check for
-// stack overflow + shadow pages. This clobbers tsp and scratch.
-void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp,
- Register Rscratch) {
- // Use stack pointer in temp stack pointer
- mov(SP, Rtsp);
-
- // Bang stack for total size given plus stack shadow page size.
- // Bang one page at a time because a large size can overflow yellow and
- // red zones (the bang will fail but stack overflow handling can't tell that
- // it was a stack overflow bang vs a regular segv).
- int offset = os::vm_page_size();
- Register Roffset = Rscratch;
-
- Label loop;
- bind(loop);
- set((-offset)+STACK_BIAS, Rscratch);
- st(G0, Rtsp, Rscratch);
- set(offset, Roffset);
- sub(Rsize, Roffset, Rsize);
- cmp(Rsize, G0);
- br(Assembler::greater, false, Assembler::pn, loop);
- delayed()->sub(Rtsp, Roffset, Rtsp);
-
- // Bang down shadow pages too.
- // The -1 because we already subtracted 1 page.
- for (int i = 0; i< StackShadowPages-1; i++) {
- set((-i*offset)+STACK_BIAS, Rscratch);
- st(G0, Rtsp, Rscratch);
- }
-}
-
-///////////////////////////////////////////////////////////////////////////////////
-#ifndef SERIALGC
-
-static address satb_log_enqueue_with_frame = NULL;
-static u_char* satb_log_enqueue_with_frame_end = NULL;
-
-static address satb_log_enqueue_frameless = NULL;
-static u_char* satb_log_enqueue_frameless_end = NULL;
-
-static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions?
-
-static void generate_satb_log_enqueue(bool with_frame) {
- BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize);
- CodeBuffer buf(bb);
- MacroAssembler masm(&buf);
-
-#define __ masm.
-
- address start = __ pc();
- Register pre_val;
-
- Label refill, restart;
- if (with_frame) {
- __ save_frame(0);
- pre_val = I0; // Was O0 before the save.
- } else {
- pre_val = O0;
- }
-
- int satb_q_index_byte_offset =
- in_bytes(JavaThread::satb_mark_queue_offset() +
- PtrQueue::byte_offset_of_index());
-
- int satb_q_buf_byte_offset =
- in_bytes(JavaThread::satb_mark_queue_offset() +
- PtrQueue::byte_offset_of_buf());
-
- assert(in_bytes(PtrQueue::byte_width_of_index()) == sizeof(intptr_t) &&
- in_bytes(PtrQueue::byte_width_of_buf()) == sizeof(intptr_t),
- "check sizes in assembly below");
-
- __ bind(restart);
-
- // Load the index into the SATB buffer. PtrQueue::_index is a size_t
- // so ld_ptr is appropriate.
- __ ld_ptr(G2_thread, satb_q_index_byte_offset, L0);
-
- // index == 0?
- __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill);
-
- __ ld_ptr(G2_thread, satb_q_buf_byte_offset, L1);
- __ sub(L0, oopSize, L0);
-
- __ st_ptr(pre_val, L1, L0); // [_buf + index] := I0
- if (!with_frame) {
- // Use return-from-leaf
- __ retl();
- __ delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset);
- } else {
- // Not delayed.
- __ st_ptr(L0, G2_thread, satb_q_index_byte_offset);
- }
- if (with_frame) {
- __ ret();
- __ delayed()->restore();
- }
- __ bind(refill);
-
- address handle_zero =
- CAST_FROM_FN_PTR(address,
- &SATBMarkQueueSet::handle_zero_index_for_thread);
- // This should be rare enough that we can afford to save all the
- // scratch registers that the calling context might be using.
- __ mov(G1_scratch, L0);
- __ mov(G3_scratch, L1);
- __ mov(G4, L2);
- // We need the value of O0 above (for the write into the buffer), so we
- // save and restore it.
- __ mov(O0, L3);
- // Since the call will overwrite O7, we save and restore that, as well.
- __ mov(O7, L4);
- __ call_VM_leaf(L5, handle_zero, G2_thread);
- __ mov(L0, G1_scratch);
- __ mov(L1, G3_scratch);
- __ mov(L2, G4);
- __ mov(L3, O0);
- __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
- __ delayed()->mov(L4, O7);
-
- if (with_frame) {
- satb_log_enqueue_with_frame = start;
- satb_log_enqueue_with_frame_end = __ pc();
- } else {
- satb_log_enqueue_frameless = start;
- satb_log_enqueue_frameless_end = __ pc();
- }
-
-#undef __
-}
-
-static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) {
- if (with_frame) {
- if (satb_log_enqueue_with_frame == 0) {
- generate_satb_log_enqueue(with_frame);
- assert(satb_log_enqueue_with_frame != 0, "postcondition.");
- if (G1SATBPrintStubs) {
- tty->print_cr("Generated with-frame satb enqueue:");
- Disassembler::decode((u_char*)satb_log_enqueue_with_frame,
- satb_log_enqueue_with_frame_end,
- tty);
- }
- }
- } else {
- if (satb_log_enqueue_frameless == 0) {
- generate_satb_log_enqueue(with_frame);
- assert(satb_log_enqueue_frameless != 0, "postcondition.");
- if (G1SATBPrintStubs) {
- tty->print_cr("Generated frameless satb enqueue:");
- Disassembler::decode((u_char*)satb_log_enqueue_frameless,
- satb_log_enqueue_frameless_end,
- tty);
- }
- }
- }
-}
-
-void MacroAssembler::g1_write_barrier_pre(Register obj,
- Register index,
- int offset,
- Register pre_val,
- Register tmp,
- bool preserve_o_regs) {
- Label filtered;
-
- if (obj == noreg) {
- // We are not loading the previous value so make
- // sure that we don't trash the value in pre_val
- // with the code below.
- assert_different_registers(pre_val, tmp);
- } else {
- // We will be loading the previous value
- // in this code so...
- assert(offset == 0 || index == noreg, "choose one");
- assert(pre_val == noreg, "check this code");
- }
-
- // Is marking active?
- if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
- ld(G2,
- in_bytes(JavaThread::satb_mark_queue_offset() +
- PtrQueue::byte_offset_of_active()),
- tmp);
- } else {
- guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
- "Assumption");
- ldsb(G2,
- in_bytes(JavaThread::satb_mark_queue_offset() +
- PtrQueue::byte_offset_of_active()),
- tmp);
- }
-
- // Is marking active?
- cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
-
- // Do we need to load the previous value?
- if (obj != noreg) {
- // Load the previous value...
- if (index == noreg) {
- if (Assembler::is_simm13(offset)) {
- load_heap_oop(obj, offset, tmp);
- } else {
- set(offset, tmp);
- load_heap_oop(obj, tmp, tmp);
- }
- } else {
- load_heap_oop(obj, index, tmp);
- }
- // Previous value has been loaded into tmp
- pre_val = tmp;
- }
-
- assert(pre_val != noreg, "must have a real register");
-
- // Is the previous value null?
- cmp_and_brx_short(pre_val, G0, Assembler::equal, Assembler::pt, filtered);
-
- // OK, it's not filtered, so we'll need to call enqueue. In the normal
- // case, pre_val will be a scratch G-reg, but there are some cases in
- // which it's an O-reg. In the first case, do a normal call. In the
- // latter, do a save here and call the frameless version.
-
- guarantee(pre_val->is_global() || pre_val->is_out(),
- "Or we need to think harder.");
-
- if (pre_val->is_global() && !preserve_o_regs) {
- generate_satb_log_enqueue_if_necessary(true); // with frame
-
- call(satb_log_enqueue_with_frame);
- delayed()->mov(pre_val, O0);
- } else {
- generate_satb_log_enqueue_if_necessary(false); // frameless
-
- save_frame(0);
- call(satb_log_enqueue_frameless);
- delayed()->mov(pre_val->after_save(), O0);
- restore();
- }
-
- bind(filtered);
-}
-
-static address dirty_card_log_enqueue = 0;
-static u_char* dirty_card_log_enqueue_end = 0;
-
-// This gets to assume that o0 contains the object address.
-static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
- BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2);
- CodeBuffer buf(bb);
- MacroAssembler masm(&buf);
-#define __ masm.
- address start = __ pc();
-
- Label not_already_dirty, restart, refill;
-
-#ifdef _LP64
- __ srlx(O0, CardTableModRefBS::card_shift, O0);
-#else
- __ srl(O0, CardTableModRefBS::card_shift, O0);
-#endif
- AddressLiteral addrlit(byte_map_base);
- __ set(addrlit, O1); // O1 := <card table base>
- __ ldub(O0, O1, O2); // O2 := [O0 + O1]
-
- assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
- __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
-
- // We didn't take the branch, so we're already dirty: return.
- // Use return-from-leaf
- __ retl();
- __ delayed()->nop();
-
- // Not dirty.
- __ bind(not_already_dirty);
-
- // Get O0 + O1 into a reg by itself
- __ add(O0, O1, O3);
-
- // First, dirty it.
- __ stb(G0, O3, G0); // [cardPtr] := 0 (i.e., dirty).
-
- int dirty_card_q_index_byte_offset =
- in_bytes(JavaThread::dirty_card_queue_offset() +
- PtrQueue::byte_offset_of_index());
- int dirty_card_q_buf_byte_offset =
- in_bytes(JavaThread::dirty_card_queue_offset() +
- PtrQueue::byte_offset_of_buf());
- __ bind(restart);
-
- // Load the index into the update buffer. PtrQueue::_index is
- // a size_t so ld_ptr is appropriate here.
- __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0);
-
- // index == 0?
- __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill);
-
- __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1);
- __ sub(L0, oopSize, L0);
-
- __ st_ptr(O3, L1, L0); // [_buf + index] := I0
- // Use return-from-leaf
- __ retl();
- __ delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset);
-
- __ bind(refill);
- address handle_zero =
- CAST_FROM_FN_PTR(address,
- &DirtyCardQueueSet::handle_zero_index_for_thread);
- // This should be rare enough that we can afford to save all the
- // scratch registers that the calling context might be using.
- __ mov(G1_scratch, L3);
- __ mov(G3_scratch, L5);
- // We need the value of O3 above (for the write into the buffer), so we
- // save and restore it.
- __ mov(O3, L6);
- // Since the call will overwrite O7, we save and restore that, as well.
- __ mov(O7, L4);
-
- __ call_VM_leaf(L7_thread_cache, handle_zero, G2_thread);
- __ mov(L3, G1_scratch);
- __ mov(L5, G3_scratch);
- __ mov(L6, O3);
- __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
- __ delayed()->mov(L4, O7);
-
- dirty_card_log_enqueue = start;
- dirty_card_log_enqueue_end = __ pc();
- // XXX Should have a guarantee here about not going off the end!
- // Does it already do so? Do an experiment...
-
-#undef __
-
-}
-
-static inline void
-generate_dirty_card_log_enqueue_if_necessary(jbyte* byte_map_base) {
- if (dirty_card_log_enqueue == 0) {
- generate_dirty_card_log_enqueue(byte_map_base);
- assert(dirty_card_log_enqueue != 0, "postcondition.");
- if (G1SATBPrintStubs) {
- tty->print_cr("Generated dirty_card enqueue:");
- Disassembler::decode((u_char*)dirty_card_log_enqueue,
- dirty_card_log_enqueue_end,
- tty);
- }
- }
-}
-
-
-void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
-
- Label filtered;
- MacroAssembler* post_filter_masm = this;
-
- if (new_val == G0) return;
-
- G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
- assert(bs->kind() == BarrierSet::G1SATBCT ||
- bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
-
- if (G1RSBarrierRegionFilter) {
- xor3(store_addr, new_val, tmp);
-#ifdef _LP64
- srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
-#else
- srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
-#endif
-
- // XXX Should I predict this taken or not? Does it matter?
- cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
- }
-
- // If the "store_addr" register is an "in" or "local" register, move it to
- // a scratch reg so we can pass it as an argument.
- bool use_scr = !(store_addr->is_global() || store_addr->is_out());
- // Pick a scratch register different from "tmp".
- Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch);
- // Make sure we use up the delay slot!
- if (use_scr) {
- post_filter_masm->mov(store_addr, scr);
- } else {
- post_filter_masm->nop();
- }
- generate_dirty_card_log_enqueue_if_necessary(bs->byte_map_base);
- save_frame(0);
- call(dirty_card_log_enqueue);
- if (use_scr) {
- delayed()->mov(scr, O0);
- } else {
- delayed()->mov(store_addr->after_save(), O0);
- }
- restore();
-
- bind(filtered);
-}
-
-#endif // SERIALGC
-///////////////////////////////////////////////////////////////////////////////////
-
-void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
- // If we're writing constant NULL, we can skip the write barrier.
- if (new_val == G0) return;
- CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
- assert(bs->kind() == BarrierSet::CardTableModRef ||
- bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
- card_table_write(bs->byte_map_base, tmp, store_addr);
-}
-
-void MacroAssembler::load_klass(Register src_oop, Register klass) {
- // The number of bytes in this code is used by
- // MachCallDynamicJavaNode::ret_addr_offset()
- // if this changes, change that.
- if (UseCompressedKlassPointers) {
- lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass);
- decode_klass_not_null(klass);
- } else {
- ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass);
- }
-}
-
-void MacroAssembler::store_klass(Register klass, Register dst_oop) {
- if (UseCompressedKlassPointers) {
- assert(dst_oop != klass, "not enough registers");
- encode_klass_not_null(klass);
- st(klass, dst_oop, oopDesc::klass_offset_in_bytes());
- } else {
- st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes());
- }
-}
-
-void MacroAssembler::store_klass_gap(Register s, Register d) {
- if (UseCompressedKlassPointers) {
- assert(s != d, "not enough registers");
- st(s, d, oopDesc::klass_gap_offset_in_bytes());
- }
-}
-
-void MacroAssembler::load_heap_oop(const Address& s, Register d) {
- if (UseCompressedOops) {
- lduw(s, d);
- decode_heap_oop(d);
- } else {
- ld_ptr(s, d);
- }
-}
-
-void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) {
- if (UseCompressedOops) {
- lduw(s1, s2, d);
- decode_heap_oop(d, d);
- } else {
- ld_ptr(s1, s2, d);
- }
-}
-
-void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) {
- if (UseCompressedOops) {
- lduw(s1, simm13a, d);
- decode_heap_oop(d, d);
- } else {
- ld_ptr(s1, simm13a, d);
- }
-}
-
-void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d) {
- if (s2.is_constant()) load_heap_oop(s1, s2.as_constant(), d);
- else load_heap_oop(s1, s2.as_register(), d);
-}
-
-void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) {
- if (UseCompressedOops) {
- assert(s1 != d && s2 != d, "not enough registers");
- encode_heap_oop(d);
- st(d, s1, s2);
- } else {
- st_ptr(d, s1, s2);
- }
-}
-
-void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) {
- if (UseCompressedOops) {
- assert(s1 != d, "not enough registers");
- encode_heap_oop(d);
- st(d, s1, simm13a);
- } else {
- st_ptr(d, s1, simm13a);
- }
-}
-
-void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) {
- if (UseCompressedOops) {
- assert(a.base() != d, "not enough registers");
- encode_heap_oop(d);
- st(d, a, offset);
- } else {
- st_ptr(d, a, offset);
- }
-}
-
-
-void MacroAssembler::encode_heap_oop(Register src, Register dst) {
- assert (UseCompressedOops, "must be compressed");
- assert (Universe::heap() != NULL, "java heap should be initialized");
- assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
- verify_oop(src);
- if (Universe::narrow_oop_base() == NULL) {
- srlx(src, LogMinObjAlignmentInBytes, dst);
- return;
- }
- Label done;
- if (src == dst) {
- // optimize for frequent case src == dst
- bpr(rc_nz, true, Assembler::pt, src, done);
- delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken
- bind(done);
- srlx(src, LogMinObjAlignmentInBytes, dst);
- } else {
- bpr(rc_z, false, Assembler::pn, src, done);
- delayed() -> mov(G0, dst);
- // could be moved before branch, and annulate delay,
- // but may add some unneeded work decoding null
- sub(src, G6_heapbase, dst);
- srlx(dst, LogMinObjAlignmentInBytes, dst);
- bind(done);
- }
-}
-
-
-void MacroAssembler::encode_heap_oop_not_null(Register r) {
- assert (UseCompressedOops, "must be compressed");
- assert (Universe::heap() != NULL, "java heap should be initialized");
- assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
- verify_oop(r);
- if (Universe::narrow_oop_base() != NULL)
- sub(r, G6_heapbase, r);
- srlx(r, LogMinObjAlignmentInBytes, r);
-}
-
-void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) {
- assert (UseCompressedOops, "must be compressed");
- assert (Universe::heap() != NULL, "java heap should be initialized");
- assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
- verify_oop(src);
- if (Universe::narrow_oop_base() == NULL) {
- srlx(src, LogMinObjAlignmentInBytes, dst);
- } else {
- sub(src, G6_heapbase, dst);
- srlx(dst, LogMinObjAlignmentInBytes, dst);
- }
-}
-
-// Same algorithm as oops.inline.hpp decode_heap_oop.
-void MacroAssembler::decode_heap_oop(Register src, Register dst) {
- assert (UseCompressedOops, "must be compressed");
- assert (Universe::heap() != NULL, "java heap should be initialized");
- assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
- sllx(src, LogMinObjAlignmentInBytes, dst);
- if (Universe::narrow_oop_base() != NULL) {
- Label done;
- bpr(rc_nz, true, Assembler::pt, dst, done);
- delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken
- bind(done);
- }
- verify_oop(dst);
-}
-
-void MacroAssembler::decode_heap_oop_not_null(Register r) {
- // Do not add assert code to this unless you change vtableStubs_sparc.cpp
- // pd_code_size_limit.
- // Also do not verify_oop as this is called by verify_oop.
- assert (UseCompressedOops, "must be compressed");
- assert (Universe::heap() != NULL, "java heap should be initialized");
- assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
- sllx(r, LogMinObjAlignmentInBytes, r);
- if (Universe::narrow_oop_base() != NULL)
- add(r, G6_heapbase, r);
-}
-
-void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) {
- // Do not add assert code to this unless you change vtableStubs_sparc.cpp
- // pd_code_size_limit.
- // Also do not verify_oop as this is called by verify_oop.
- assert (UseCompressedOops, "must be compressed");
- assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
- sllx(src, LogMinObjAlignmentInBytes, dst);
- if (Universe::narrow_oop_base() != NULL)
- add(dst, G6_heapbase, dst);
-}
-
-void MacroAssembler::encode_klass_not_null(Register r) {
- assert(Metaspace::is_initialized(), "metaspace should be initialized");
- assert (UseCompressedKlassPointers, "must be compressed");
- assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
- if (Universe::narrow_klass_base() != NULL)
- sub(r, G6_heapbase, r);
- srlx(r, LogKlassAlignmentInBytes, r);
-}
-
-void MacroAssembler::encode_klass_not_null(Register src, Register dst) {
- assert(Metaspace::is_initialized(), "metaspace should be initialized");
- assert (UseCompressedKlassPointers, "must be compressed");
- assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
- if (Universe::narrow_klass_base() == NULL) {
- srlx(src, LogKlassAlignmentInBytes, dst);
- } else {
- sub(src, G6_heapbase, dst);
- srlx(dst, LogKlassAlignmentInBytes, dst);
- }
-}
-
-void MacroAssembler::decode_klass_not_null(Register r) {
- assert(Metaspace::is_initialized(), "metaspace should be initialized");
- // Do not add assert code to this unless you change vtableStubs_sparc.cpp
- // pd_code_size_limit.
- assert (UseCompressedKlassPointers, "must be compressed");
- assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
- sllx(r, LogKlassAlignmentInBytes, r);
- if (Universe::narrow_klass_base() != NULL)
- add(r, G6_heapbase, r);
-}
-
-void MacroAssembler::decode_klass_not_null(Register src, Register dst) {
- assert(Metaspace::is_initialized(), "metaspace should be initialized");
- // Do not add assert code to this unless you change vtableStubs_sparc.cpp
- // pd_code_size_limit.
- assert (UseCompressedKlassPointers, "must be compressed");
- assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
- sllx(src, LogKlassAlignmentInBytes, dst);
- if (Universe::narrow_klass_base() != NULL)
- add(dst, G6_heapbase, dst);
-}
-
-void MacroAssembler::reinit_heapbase() {
- if (UseCompressedOops || UseCompressedKlassPointers) {
- AddressLiteral base(Universe::narrow_ptrs_base_addr());
- load_ptr_contents(base, G6_heapbase);
- }
-}
-
-// Compare char[] arrays aligned to 4 bytes.
-void MacroAssembler::char_arrays_equals(Register ary1, Register ary2,
- Register limit, Register result,
- Register chr1, Register chr2, Label& Ldone) {
- Label Lvector, Lloop;
- assert(chr1 == result, "should be the same");
-
- // Note: limit contains number of bytes (2*char_elements) != 0.
- andcc(limit, 0x2, chr1); // trailing character ?
- br(Assembler::zero, false, Assembler::pt, Lvector);
- delayed()->nop();
-
- // compare the trailing char
- sub(limit, sizeof(jchar), limit);
- lduh(ary1, limit, chr1);
- lduh(ary2, limit, chr2);
- cmp(chr1, chr2);
- br(Assembler::notEqual, true, Assembler::pt, Ldone);
- delayed()->mov(G0, result); // not equal
-
- // only one char ?
- cmp_zero_and_br(zero, limit, Ldone, true, Assembler::pn);
- delayed()->add(G0, 1, result); // zero-length arrays are equal
-
- // word by word compare, dont't need alignment check
- bind(Lvector);
- // Shift ary1 and ary2 to the end of the arrays, negate limit
- add(ary1, limit, ary1);
- add(ary2, limit, ary2);
- neg(limit, limit);
-
- lduw(ary1, limit, chr1);
- bind(Lloop);
- lduw(ary2, limit, chr2);
- cmp(chr1, chr2);
- br(Assembler::notEqual, true, Assembler::pt, Ldone);
- delayed()->mov(G0, result); // not equal
- inccc(limit, 2*sizeof(jchar));
- // annul LDUW if branch is not taken to prevent access past end of array
- br(Assembler::notZero, true, Assembler::pt, Lloop);
- delayed()->lduw(ary1, limit, chr1); // hoisted
-
- // Caller should set it:
- // add(G0, 1, result); // equals
-}
-
-// Use BIS for zeroing (count is in bytes).
-void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) {
- assert(UseBlockZeroing && VM_Version::has_block_zeroing(), "only works with BIS zeroing");
- Register end = count;
- int cache_line_size = VM_Version::prefetch_data_size();
- // Minimum count when BIS zeroing can be used since
- // it needs membar which is expensive.
- int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit);
-
- Label small_loop;
- // Check if count is negative (dead code) or zero.
- // Note, count uses 64bit in 64 bit VM.
- cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone);
-
- // Use BIS zeroing only for big arrays since it requires membar.
- if (Assembler::is_simm13(block_zero_size)) { // < 4096
- cmp(count, block_zero_size);
- } else {
- set(block_zero_size, temp);
- cmp(count, temp);
- }
- br(Assembler::lessUnsigned, false, Assembler::pt, small_loop);
- delayed()->add(to, count, end);
-
- // Note: size is >= three (32 bytes) cache lines.
-
- // Clean the beginning of space up to next cache line.
- for (int offs = 0; offs < cache_line_size; offs += 8) {
- stx(G0, to, offs);
- }
-
- // align to next cache line
- add(to, cache_line_size, to);
- and3(to, -cache_line_size, to);
-
- // Note: size left >= two (32 bytes) cache lines.
-
- // BIS should not be used to zero tail (64 bytes)
- // to avoid zeroing a header of the following object.
- sub(end, (cache_line_size*2)-8, end);
-
- Label bis_loop;
- bind(bis_loop);
- stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
- add(to, cache_line_size, to);
- cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop);
-
- // BIS needs membar.
- membar(Assembler::StoreLoad);
-
- add(end, (cache_line_size*2)-8, end); // restore end
- cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone);
-
- // Clean the tail.
- bind(small_loop);
- stx(G0, to, 0);
- add(to, 8, to);
- cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop);
- nop(); // Separate short branches
-}
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,554 +25,13 @@
#ifndef CPU_SPARC_VM_ASSEMBLER_SPARC_HPP
#define CPU_SPARC_VM_ASSEMBLER_SPARC_HPP
-class BiasedLockingCounters;
-
-// <sys/trap.h> promises that the system will not use traps 16-31
-#define ST_RESERVED_FOR_USER_0 0x10
-
-/* Written: David Ungar 4/19/97 */
-
-// Contains all the definitions needed for sparc assembly code generation.
-
-// Register aliases for parts of the system:
-
-// 64 bit values can be kept in g1-g5, o1-o5 and o7 and all 64 bits are safe
-// across context switches in V8+ ABI. Of course, there are no 64 bit regs
-// in V8 ABI. All 64 bits are preserved in V9 ABI for all registers.
-
-// g2-g4 are scratch registers called "application globals". Their
-// meaning is reserved to the "compilation system"--which means us!
-// They are are not supposed to be touched by ordinary C code, although
-// highly-optimized C code might steal them for temps. They are safe
-// across thread switches, and the ABI requires that they be safe
-// across function calls.
-//
-// g1 and g3 are touched by more modules. V8 allows g1 to be clobbered
-// across func calls, and V8+ also allows g5 to be clobbered across
-// func calls. Also, g1 and g5 can get touched while doing shared
-// library loading.
-//
-// We must not touch g7 (it is the thread-self register) and g6 is
-// reserved for certain tools. g0, of course, is always zero.
-//
-// (Sources: SunSoft Compilers Group, thread library engineers.)
-
-// %%%% The interpreter should be revisited to reduce global scratch regs.
-
-// This global always holds the current JavaThread pointer:
-
-REGISTER_DECLARATION(Register, G2_thread , G2);
-REGISTER_DECLARATION(Register, G6_heapbase , G6);
-
-// The following globals are part of the Java calling convention:
-
-REGISTER_DECLARATION(Register, G5_method , G5);
-REGISTER_DECLARATION(Register, G5_megamorphic_method , G5_method);
-REGISTER_DECLARATION(Register, G5_inline_cache_reg , G5_method);
-
-// The following globals are used for the new C1 & interpreter calling convention:
-REGISTER_DECLARATION(Register, Gargs , G4); // pointing to the last argument
-
-// This local is used to preserve G2_thread in the interpreter and in stubs:
-REGISTER_DECLARATION(Register, L7_thread_cache , L7);
-
-// These globals are used as scratch registers in the interpreter:
-
-REGISTER_DECLARATION(Register, Gframe_size , G1); // SAME REG as G1_scratch
-REGISTER_DECLARATION(Register, G1_scratch , G1); // also SAME
-REGISTER_DECLARATION(Register, G3_scratch , G3);
-REGISTER_DECLARATION(Register, G4_scratch , G4);
-
-// These globals are used as short-lived scratch registers in the compiler:
-
-REGISTER_DECLARATION(Register, Gtemp , G5);
-
-// JSR 292 fixed register usages:
-REGISTER_DECLARATION(Register, G5_method_type , G5);
-REGISTER_DECLARATION(Register, G3_method_handle , G3);
-REGISTER_DECLARATION(Register, L7_mh_SP_save , L7);
-
-// The compiler requires that G5_megamorphic_method is G5_inline_cache_klass,
-// because a single patchable "set" instruction (NativeMovConstReg,
-// or NativeMovConstPatching for compiler1) instruction
-// serves to set up either quantity, depending on whether the compiled
-// call site is an inline cache or is megamorphic. See the function
-// CompiledIC::set_to_megamorphic.
-//
-// If a inline cache targets an interpreted method, then the
-// G5 register will be used twice during the call. First,
-// the call site will be patched to load a compiledICHolder
-// into G5. (This is an ordered pair of ic_klass, method.)
-// The c2i adapter will first check the ic_klass, then load
-// G5_method with the method part of the pair just before
-// jumping into the interpreter.
-//
-// Note that G5_method is only the method-self for the interpreter,
-// and is logically unrelated to G5_megamorphic_method.
-//
-// Invariants on G2_thread (the JavaThread pointer):
-// - it should not be used for any other purpose anywhere
-// - it must be re-initialized by StubRoutines::call_stub()
-// - it must be preserved around every use of call_VM
-
-// We can consider using g2/g3/g4 to cache more values than the
-// JavaThread, such as the card-marking base or perhaps pointers into
-// Eden. It's something of a waste to use them as scratch temporaries,
-// since they are not supposed to be volatile. (Of course, if we find
-// that Java doesn't benefit from application globals, then we can just
-// use them as ordinary temporaries.)
-//
-// Since g1 and g5 (and/or g6) are the volatile (caller-save) registers,
-// it makes sense to use them routinely for procedure linkage,
-// whenever the On registers are not applicable. Examples: G5_method,
-// G5_inline_cache_klass, and a double handful of miscellaneous compiler
-// stubs. This means that compiler stubs, etc., should be kept to a
-// maximum of two or three G-register arguments.
-
-
-// stub frames
-
-REGISTER_DECLARATION(Register, Lentry_args , L0); // pointer to args passed to callee (interpreter) not stub itself
-
-// Interpreter frames
-
-#ifdef CC_INTERP
-REGISTER_DECLARATION(Register, Lstate , L0); // interpreter state object pointer
-REGISTER_DECLARATION(Register, L1_scratch , L1); // scratch
-REGISTER_DECLARATION(Register, Lmirror , L1); // mirror (for native methods only)
-REGISTER_DECLARATION(Register, L2_scratch , L2);
-REGISTER_DECLARATION(Register, L3_scratch , L3);
-REGISTER_DECLARATION(Register, L4_scratch , L4);
-REGISTER_DECLARATION(Register, Lscratch , L5); // C1 uses
-REGISTER_DECLARATION(Register, Lscratch2 , L6); // C1 uses
-REGISTER_DECLARATION(Register, L7_scratch , L7); // constant pool cache
-REGISTER_DECLARATION(Register, O5_savedSP , O5);
-REGISTER_DECLARATION(Register, I5_savedSP , I5); // Saved SP before bumping for locals. This is simply
- // a copy SP, so in 64-bit it's a biased value. The bias
- // is added and removed as needed in the frame code.
-// Interface to signature handler
-REGISTER_DECLARATION(Register, Llocals , L7); // pointer to locals for signature handler
-REGISTER_DECLARATION(Register, Lmethod , L6); // Method* when calling signature handler
-
-#else
-REGISTER_DECLARATION(Register, Lesp , L0); // expression stack pointer
-REGISTER_DECLARATION(Register, Lbcp , L1); // pointer to next bytecode
-REGISTER_DECLARATION(Register, Lmethod , L2);
-REGISTER_DECLARATION(Register, Llocals , L3);
-REGISTER_DECLARATION(Register, Largs , L3); // pointer to locals for signature handler
- // must match Llocals in asm interpreter
-REGISTER_DECLARATION(Register, Lmonitors , L4);
-REGISTER_DECLARATION(Register, Lbyte_code , L5);
-// When calling out from the interpreter we record SP so that we can remove any extra stack
-// space allocated during adapter transitions. This register is only live from the point
-// of the call until we return.
-REGISTER_DECLARATION(Register, Llast_SP , L5);
-REGISTER_DECLARATION(Register, Lscratch , L5);
-REGISTER_DECLARATION(Register, Lscratch2 , L6);
-REGISTER_DECLARATION(Register, LcpoolCache , L6); // constant pool cache
-
-REGISTER_DECLARATION(Register, O5_savedSP , O5);
-REGISTER_DECLARATION(Register, I5_savedSP , I5); // Saved SP before bumping for locals. This is simply
- // a copy SP, so in 64-bit it's a biased value. The bias
- // is added and removed as needed in the frame code.
-REGISTER_DECLARATION(Register, IdispatchTables , I4); // Base address of the bytecode dispatch tables
-REGISTER_DECLARATION(Register, IdispatchAddress , I3); // Register which saves the dispatch address for each bytecode
-REGISTER_DECLARATION(Register, ImethodDataPtr , I2); // Pointer to the current method data
-#endif /* CC_INTERP */
-
-// NOTE: Lscratch2 and LcpoolCache point to the same registers in
-// the interpreter code. If Lscratch2 needs to be used for some
-// purpose than LcpoolCache should be restore after that for
-// the interpreter to work right
-// (These assignments must be compatible with L7_thread_cache; see above.)
-
-// Since Lbcp points into the middle of the method object,
-// it is temporarily converted into a "bcx" during GC.
-
-// Exception processing
-// These registers are passed into exception handlers.
-// All exception handlers require the exception object being thrown.
-// In addition, an nmethod's exception handler must be passed
-// the address of the call site within the nmethod, to allow
-// proper selection of the applicable catch block.
-// (Interpreter frames use their own bcp() for this purpose.)
-//
-// The Oissuing_pc value is not always needed. When jumping to a
-// handler that is known to be interpreted, the Oissuing_pc value can be
-// omitted. An actual catch block in compiled code receives (from its
-// nmethod's exception handler) the thrown exception in the Oexception,
-// but it doesn't need the Oissuing_pc.
-//
-// If an exception handler (either interpreted or compiled)
-// discovers there is no applicable catch block, it updates
-// the Oissuing_pc to the continuation PC of its own caller,
-// pops back to that caller's stack frame, and executes that
-// caller's exception handler. Obviously, this process will
-// iterate until the control stack is popped back to a method
-// containing an applicable catch block. A key invariant is
-// that the Oissuing_pc value is always a value local to
-// the method whose exception handler is currently executing.
-//
-// Note: The issuing PC value is __not__ a raw return address (I7 value).
-// It is a "return pc", the address __following__ the call.
-// Raw return addresses are converted to issuing PCs by frame::pc(),
-// or by stubs. Issuing PCs can be used directly with PC range tables.
-//
-REGISTER_DECLARATION(Register, Oexception , O0); // exception being thrown
-REGISTER_DECLARATION(Register, Oissuing_pc , O1); // where the exception is coming from
-
-
-// These must occur after the declarations above
-#ifndef DONT_USE_REGISTER_DEFINES
-
-#define Gthread AS_REGISTER(Register, Gthread)
-#define Gmethod AS_REGISTER(Register, Gmethod)
-#define Gmegamorphic_method AS_REGISTER(Register, Gmegamorphic_method)
-#define Ginline_cache_reg AS_REGISTER(Register, Ginline_cache_reg)
-#define Gargs AS_REGISTER(Register, Gargs)
-#define Lthread_cache AS_REGISTER(Register, Lthread_cache)
-#define Gframe_size AS_REGISTER(Register, Gframe_size)
-#define Gtemp AS_REGISTER(Register, Gtemp)
-
-#ifdef CC_INTERP
-#define Lstate AS_REGISTER(Register, Lstate)
-#define Lesp AS_REGISTER(Register, Lesp)
-#define L1_scratch AS_REGISTER(Register, L1_scratch)
-#define Lmirror AS_REGISTER(Register, Lmirror)
-#define L2_scratch AS_REGISTER(Register, L2_scratch)
-#define L3_scratch AS_REGISTER(Register, L3_scratch)
-#define L4_scratch AS_REGISTER(Register, L4_scratch)
-#define Lscratch AS_REGISTER(Register, Lscratch)
-#define Lscratch2 AS_REGISTER(Register, Lscratch2)
-#define L7_scratch AS_REGISTER(Register, L7_scratch)
-#define Ostate AS_REGISTER(Register, Ostate)
-#else
-#define Lesp AS_REGISTER(Register, Lesp)
-#define Lbcp AS_REGISTER(Register, Lbcp)
-#define Lmethod AS_REGISTER(Register, Lmethod)
-#define Llocals AS_REGISTER(Register, Llocals)
-#define Lmonitors AS_REGISTER(Register, Lmonitors)
-#define Lbyte_code AS_REGISTER(Register, Lbyte_code)
-#define Lscratch AS_REGISTER(Register, Lscratch)
-#define Lscratch2 AS_REGISTER(Register, Lscratch2)
-#define LcpoolCache AS_REGISTER(Register, LcpoolCache)
-#endif /* ! CC_INTERP */
-
-#define Lentry_args AS_REGISTER(Register, Lentry_args)
-#define I5_savedSP AS_REGISTER(Register, I5_savedSP)
-#define O5_savedSP AS_REGISTER(Register, O5_savedSP)
-#define IdispatchAddress AS_REGISTER(Register, IdispatchAddress)
-#define ImethodDataPtr AS_REGISTER(Register, ImethodDataPtr)
-#define IdispatchTables AS_REGISTER(Register, IdispatchTables)
-
-#define Oexception AS_REGISTER(Register, Oexception)
-#define Oissuing_pc AS_REGISTER(Register, Oissuing_pc)
-
-
-#endif
-
-// Address is an abstraction used to represent a memory location.
-//
-// Note: A register location is represented via a Register, not
-// via an address for efficiency & simplicity reasons.
-
-class Address VALUE_OBJ_CLASS_SPEC {
- private:
- Register _base; // Base register.
- RegisterOrConstant _index_or_disp; // Index register or constant displacement.
- RelocationHolder _rspec;
-
- public:
- Address() : _base(noreg), _index_or_disp(noreg) {}
-
- Address(Register base, RegisterOrConstant index_or_disp)
- : _base(base),
- _index_or_disp(index_or_disp) {
- }
-
- Address(Register base, Register index)
- : _base(base),
- _index_or_disp(index) {
- }
-
- Address(Register base, int disp)
- : _base(base),
- _index_or_disp(disp) {
- }
-
-#ifdef ASSERT
- // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
- Address(Register base, ByteSize disp)
- : _base(base),
- _index_or_disp(in_bytes(disp)) {
- }
-#endif
-
- // accessors
- Register base() const { return _base; }
- Register index() const { return _index_or_disp.as_register(); }
- int disp() const { return _index_or_disp.as_constant(); }
-
- bool has_index() const { return _index_or_disp.is_register(); }
- bool has_disp() const { return _index_or_disp.is_constant(); }
-
- bool uses(Register reg) const { return base() == reg || (has_index() && index() == reg); }
-
- const relocInfo::relocType rtype() { return _rspec.type(); }
- const RelocationHolder& rspec() { return _rspec; }
-
- RelocationHolder rspec(int offset) const {
- return offset == 0 ? _rspec : _rspec.plus(offset);
- }
-
- inline bool is_simm13(int offset = 0); // check disp+offset for overflow
-
- Address plus_disp(int plusdisp) const { // bump disp by a small amount
- assert(_index_or_disp.is_constant(), "must have a displacement");
- Address a(base(), disp() + plusdisp);
- return a;
- }
- bool is_same_address(Address a) const {
- // disregard _rspec
- return base() == a.base() && (has_index() ? index() == a.index() : disp() == a.disp());
- }
-
- Address after_save() const {
- Address a = (*this);
- a._base = a._base->after_save();
- return a;
- }
-
- Address after_restore() const {
- Address a = (*this);
- a._base = a._base->after_restore();
- return a;
- }
-
- // Convert the raw encoding form into the form expected by the
- // constructor for Address.
- static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc);
-
- friend class Assembler;
-};
-
-
-class AddressLiteral VALUE_OBJ_CLASS_SPEC {
- private:
- address _address;
- RelocationHolder _rspec;
-
- RelocationHolder rspec_from_rtype(relocInfo::relocType rtype, address addr) {
- switch (rtype) {
- case relocInfo::external_word_type:
- return external_word_Relocation::spec(addr);
- case relocInfo::internal_word_type:
- return internal_word_Relocation::spec(addr);
-#ifdef _LP64
- case relocInfo::opt_virtual_call_type:
- return opt_virtual_call_Relocation::spec();
- case relocInfo::static_call_type:
- return static_call_Relocation::spec();
- case relocInfo::runtime_call_type:
- return runtime_call_Relocation::spec();
-#endif
- case relocInfo::none:
- return RelocationHolder();
- default:
- ShouldNotReachHere();
- return RelocationHolder();
- }
- }
-
- protected:
- // creation
- AddressLiteral() : _address(NULL), _rspec(NULL) {}
-
- public:
- AddressLiteral(address addr, RelocationHolder const& rspec)
- : _address(addr),
- _rspec(rspec) {}
-
- // Some constructors to avoid casting at the call site.
- AddressLiteral(jobject obj, RelocationHolder const& rspec)
- : _address((address) obj),
- _rspec(rspec) {}
-
- AddressLiteral(intptr_t value, RelocationHolder const& rspec)
- : _address((address) value),
- _rspec(rspec) {}
-
- AddressLiteral(address addr, relocInfo::relocType rtype = relocInfo::none)
- : _address((address) addr),
- _rspec(rspec_from_rtype(rtype, (address) addr)) {}
-
- // Some constructors to avoid casting at the call site.
- AddressLiteral(address* addr, relocInfo::relocType rtype = relocInfo::none)
- : _address((address) addr),
- _rspec(rspec_from_rtype(rtype, (address) addr)) {}
-
- AddressLiteral(bool* addr, relocInfo::relocType rtype = relocInfo::none)
- : _address((address) addr),
- _rspec(rspec_from_rtype(rtype, (address) addr)) {}
-
- AddressLiteral(const bool* addr, relocInfo::relocType rtype = relocInfo::none)
- : _address((address) addr),
- _rspec(rspec_from_rtype(rtype, (address) addr)) {}
-
- AddressLiteral(signed char* addr, relocInfo::relocType rtype = relocInfo::none)
- : _address((address) addr),
- _rspec(rspec_from_rtype(rtype, (address) addr)) {}
-
- AddressLiteral(int* addr, relocInfo::relocType rtype = relocInfo::none)
- : _address((address) addr),
- _rspec(rspec_from_rtype(rtype, (address) addr)) {}
-
- AddressLiteral(intptr_t addr, relocInfo::relocType rtype = relocInfo::none)
- : _address((address) addr),
- _rspec(rspec_from_rtype(rtype, (address) addr)) {}
-
-#ifdef _LP64
- // 32-bit complains about a multiple declaration for int*.
- AddressLiteral(intptr_t* addr, relocInfo::relocType rtype = relocInfo::none)
- : _address((address) addr),
- _rspec(rspec_from_rtype(rtype, (address) addr)) {}
-#endif
-
- AddressLiteral(Metadata* addr, relocInfo::relocType rtype = relocInfo::none)
- : _address((address) addr),
- _rspec(rspec_from_rtype(rtype, (address) addr)) {}
-
- AddressLiteral(Metadata** addr, relocInfo::relocType rtype = relocInfo::none)
- : _address((address) addr),
- _rspec(rspec_from_rtype(rtype, (address) addr)) {}
-
- AddressLiteral(float* addr, relocInfo::relocType rtype = relocInfo::none)
- : _address((address) addr),
- _rspec(rspec_from_rtype(rtype, (address) addr)) {}
-
- AddressLiteral(double* addr, relocInfo::relocType rtype = relocInfo::none)
- : _address((address) addr),
- _rspec(rspec_from_rtype(rtype, (address) addr)) {}
-
- intptr_t value() const { return (intptr_t) _address; }
- int low10() const;
-
- const relocInfo::relocType rtype() const { return _rspec.type(); }
- const RelocationHolder& rspec() const { return _rspec; }
-
- RelocationHolder rspec(int offset) const {
- return offset == 0 ? _rspec : _rspec.plus(offset);
- }
-};
-
-// Convenience classes
-class ExternalAddress: public AddressLiteral {
- private:
- static relocInfo::relocType reloc_for_target(address target) {
- // Sometimes ExternalAddress is used for values which aren't
- // exactly addresses, like the card table base.
- // external_word_type can't be used for values in the first page
- // so just skip the reloc in that case.
- return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
- }
-
- public:
- ExternalAddress(address target) : AddressLiteral(target, reloc_for_target( target)) {}
- ExternalAddress(Metadata** target) : AddressLiteral(target, reloc_for_target((address) target)) {}
-};
-
-inline Address RegisterImpl::address_in_saved_window() const {
- return (Address(SP, (sp_offset_in_saved_window() * wordSize) + STACK_BIAS));
-}
-
-
-
-// Argument is an abstraction used to represent an outgoing
-// actual argument or an incoming formal parameter, whether
-// it resides in memory or in a register, in a manner consistent
-// with the SPARC Application Binary Interface, or ABI. This is
-// often referred to as the native or C calling convention.
-
-class Argument VALUE_OBJ_CLASS_SPEC {
- private:
- int _number;
- bool _is_in;
-
- public:
-#ifdef _LP64
- enum {
- n_register_parameters = 6, // only 6 registers may contain integer parameters
- n_float_register_parameters = 16 // Can have up to 16 floating registers
- };
-#else
- enum {
- n_register_parameters = 6 // only 6 registers may contain integer parameters
- };
-#endif
-
- // creation
- Argument(int number, bool is_in) : _number(number), _is_in(is_in) {}
-
- int number() const { return _number; }
- bool is_in() const { return _is_in; }
- bool is_out() const { return !is_in(); }
-
- Argument successor() const { return Argument(number() + 1, is_in()); }
- Argument as_in() const { return Argument(number(), true ); }
- Argument as_out() const { return Argument(number(), false); }
-
- // locating register-based arguments:
- bool is_register() const { return _number < n_register_parameters; }
-
-#ifdef _LP64
- // locating Floating Point register-based arguments:
- bool is_float_register() const { return _number < n_float_register_parameters; }
-
- FloatRegister as_float_register() const {
- assert(is_float_register(), "must be a register argument");
- return as_FloatRegister(( number() *2 ) + 1);
- }
- FloatRegister as_double_register() const {
- assert(is_float_register(), "must be a register argument");
- return as_FloatRegister(( number() *2 ));
- }
-#endif
-
- Register as_register() const {
- assert(is_register(), "must be a register argument");
- return is_in() ? as_iRegister(number()) : as_oRegister(number());
- }
-
- // locating memory-based arguments
- Address as_address() const {
- assert(!is_register(), "must be a memory argument");
- return address_in_frame();
- }
-
- // When applied to a register-based argument, give the corresponding address
- // into the 6-word area "into which callee may store register arguments"
- // (This is a different place than the corresponding register-save area location.)
- Address address_in_frame() const;
-
- // debugging
- const char* name() const;
-
- friend class Assembler;
-};
-
+#include "asm/register.hpp"
// The SPARC Assembler: Pure assembler doing NO optimizations on the instruction
// level; i.e., what you write
// is what you get. The Assembler is generating code into a CodeBuffer.
class Assembler : public AbstractAssembler {
- protected:
-
- static void print_instruction(int inst);
- static int patched_branch(int dest_pos, int inst, int inst_pos);
- static int branch_destination(int inst, int pos);
-
-
friend class AbstractAssembler;
friend class AddressLiteral;
@@ -1230,10 +689,7 @@
// pp 135 (addc was addx in v8)
inline void add(Register s1, Register s2, Register d );
- inline void add(Register s1, int simm13a, Register d, relocInfo::relocType rtype = relocInfo::none);
- inline void add(Register s1, int simm13a, Register d, RelocationHolder const& rspec);
- inline void add(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
- inline void add(const Address& a, Register d, int offset = 0);
+ inline void add(Register s1, int simm13a, Register d );
void addcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
void addcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
@@ -1395,12 +851,9 @@
// 171
- inline void ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d);
inline void ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d);
inline void ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec = RelocationHolder());
- inline void ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset = 0);
-
inline void ldfsr( Register s1, Register s2 );
inline void ldfsr( Register s1, int simm13a);
@@ -1438,36 +891,9 @@
inline void lduw( Register s1, int simm13a, Register d);
inline void ldx( Register s1, Register s2, Register d );
inline void ldx( Register s1, int simm13a, Register d);
- inline void ld( Register s1, Register s2, Register d );
- inline void ld( Register s1, int simm13a, Register d);
inline void ldd( Register s1, Register s2, Register d );
inline void ldd( Register s1, int simm13a, Register d);
-#ifdef ASSERT
- // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
- inline void ld( Register s1, ByteSize simm13a, Register d);
-#endif
-
- inline void ldsb(const Address& a, Register d, int offset = 0);
- inline void ldsh(const Address& a, Register d, int offset = 0);
- inline void ldsw(const Address& a, Register d, int offset = 0);
- inline void ldub(const Address& a, Register d, int offset = 0);
- inline void lduh(const Address& a, Register d, int offset = 0);
- inline void lduw(const Address& a, Register d, int offset = 0);
- inline void ldx( const Address& a, Register d, int offset = 0);
- inline void ld( const Address& a, Register d, int offset = 0);
- inline void ldd( const Address& a, Register d, int offset = 0);
-
- inline void ldub( Register s1, RegisterOrConstant s2, Register d );
- inline void ldsb( Register s1, RegisterOrConstant s2, Register d );
- inline void lduh( Register s1, RegisterOrConstant s2, Register d );
- inline void ldsh( Register s1, RegisterOrConstant s2, Register d );
- inline void lduw( Register s1, RegisterOrConstant s2, Register d );
- inline void ldsw( Register s1, RegisterOrConstant s2, Register d );
- inline void ldx( Register s1, RegisterOrConstant s2, Register d );
- inline void ld( Register s1, RegisterOrConstant s2, Register d );
- inline void ldd( Register s1, RegisterOrConstant s2, Register d );
-
// pp 177
void ldsba( Register s1, Register s2, int ia, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldsb_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
@@ -1505,7 +931,6 @@
void andcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void andn( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 ) | rs1(s1) | rs2(s2) ); }
void andn( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
- void andn( Register s1, RegisterOrConstant s2, Register d);
void andncc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
void andncc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void or3( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3 ) | rs1(s1) | rs2(s2) ); }
@@ -1584,13 +1009,12 @@
// pp 203
- void prefetch( Register s1, Register s2, PrefetchFcn f);
- void prefetch( Register s1, int simm13a, PrefetchFcn f);
+ void prefetch( Register s1, Register s2, PrefetchFcn f) { v9_only(); emit_long( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | rs2(s2) ); }
+ void prefetch( Register s1, int simm13a, PrefetchFcn f) { v9_only(); emit_data( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
+
void prefetcha( Register s1, Register s2, int ia, PrefetchFcn f ) { v9_only(); emit_long( op(ldst_op) | fcn(f) | op3(prefetch_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
void prefetcha( Register s1, int simm13a, PrefetchFcn f ) { v9_only(); emit_long( op(ldst_op) | fcn(f) | op3(prefetch_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
- inline void prefetch(const Address& a, PrefetchFcn F, int offset = 0);
-
// pp 208
// not implementing read privileged register
@@ -1653,10 +1077,8 @@
// pp 222
- inline void stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2);
inline void stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2);
inline void stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a);
- inline void stf( FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset = 0);
inline void stfsr( Register s1, Register s2 );
inline void stfsr( Register s1, int simm13a);
@@ -1676,32 +1098,11 @@
inline void sth( Register d, Register s1, int simm13a);
inline void stw( Register d, Register s1, Register s2 );
inline void stw( Register d, Register s1, int simm13a);
- inline void st( Register d, Register s1, Register s2 );
- inline void st( Register d, Register s1, int simm13a);
inline void stx( Register d, Register s1, Register s2 );
inline void stx( Register d, Register s1, int simm13a);
inline void std( Register d, Register s1, Register s2 );
inline void std( Register d, Register s1, int simm13a);
-#ifdef ASSERT
- // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
- inline void st( Register d, Register s1, ByteSize simm13a);
-#endif
-
- inline void stb( Register d, const Address& a, int offset = 0 );
- inline void sth( Register d, const Address& a, int offset = 0 );
- inline void stw( Register d, const Address& a, int offset = 0 );
- inline void stx( Register d, const Address& a, int offset = 0 );
- inline void st( Register d, const Address& a, int offset = 0 );
- inline void std( Register d, const Address& a, int offset = 0 );
-
- inline void stb( Register d, Register s1, RegisterOrConstant s2 );
- inline void sth( Register d, Register s1, RegisterOrConstant s2 );
- inline void stw( Register d, Register s1, RegisterOrConstant s2 );
- inline void stx( Register d, Register s1, RegisterOrConstant s2 );
- inline void std( Register d, Register s1, RegisterOrConstant s2 );
- inline void st( Register d, Register s1, RegisterOrConstant s2 );
-
// pp 177
void stba( Register d, Register s1, Register s2, int ia ) { emit_long( op(ldst_op) | rd(d) | op3(stb_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
@@ -1731,9 +1132,6 @@
void sub( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | rs2(s2) ); }
void sub( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
- // Note: offset is added to s2.
- inline void sub(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
-
void subcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3 ) | rs1(s1) | rs2(s2) ); }
void subcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void subc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(subc_op3 ) | rs1(s1) | rs2(s2) ); }
@@ -1745,7 +1143,6 @@
inline void swap( Register s1, Register s2, Register d );
inline void swap( Register s1, int simm13a, Register d);
- inline void swap( Address& a, Register d, int offset = 0 );
// pp 232
@@ -1799,879 +1196,12 @@
void movwtos( Register s, FloatRegister d ) { vis3_only(); emit_long( op(arith_op) | fd(d, FloatRegisterImpl::S) | op3(mftoi_op3) | opf(mwtos_opf) | rs2(s)); }
void movxtod( Register s, FloatRegister d ) { vis3_only(); emit_long( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(mftoi_op3) | opf(mxtod_opf) | rs2(s)); }
-
-
-
- // For a given register condition, return the appropriate condition code
- // Condition (the one you would use to get the same effect after "tst" on
- // the target register.)
- Assembler::Condition reg_cond_to_cc_cond(RCondition in);
-
-
// Creation
Assembler(CodeBuffer* code) : AbstractAssembler(code) {
#ifdef CHECK_DELAY
delay_state = no_delay;
#endif
}
-
- // Testing
-#ifndef PRODUCT
- void test_v9();
- void test_v8_onlys();
-#endif
-};
-
-
-class RegistersForDebugging : public StackObj {
- public:
- intptr_t i[8], l[8], o[8], g[8];
- float f[32];
- double d[32];
-
- void print(outputStream* s);
-
- static int i_offset(int j) { return offset_of(RegistersForDebugging, i[j]); }
- static int l_offset(int j) { return offset_of(RegistersForDebugging, l[j]); }
- static int o_offset(int j) { return offset_of(RegistersForDebugging, o[j]); }
- static int g_offset(int j) { return offset_of(RegistersForDebugging, g[j]); }
- static int f_offset(int j) { return offset_of(RegistersForDebugging, f[j]); }
- static int d_offset(int j) { return offset_of(RegistersForDebugging, d[j / 2]); }
-
- // gen asm code to save regs
- static void save_registers(MacroAssembler* a);
-
- // restore global registers in case C code disturbed them
- static void restore_registers(MacroAssembler* a, Register r);
-
-
};
-
-// MacroAssembler extends Assembler by a few frequently used macros.
-//
-// Most of the standard SPARC synthetic ops are defined here.
-// Instructions for which a 'better' code sequence exists depending
-// on arguments should also go in here.
-
-#define JMP2(r1, r2) jmp(r1, r2, __FILE__, __LINE__)
-#define JMP(r1, off) jmp(r1, off, __FILE__, __LINE__)
-#define JUMP(a, temp, off) jump(a, temp, off, __FILE__, __LINE__)
-#define JUMPL(a, temp, d, off) jumpl(a, temp, d, off, __FILE__, __LINE__)
-
-
-class MacroAssembler: public Assembler {
- protected:
- // Support for VM calls
- // This is the base routine called by the different versions of call_VM_leaf. The interpreter
- // may customize this version by overriding it for its purposes (e.g., to save/restore
- // additional registers when doing a VM call).
-#ifdef CC_INTERP
- #define VIRTUAL
-#else
- #define VIRTUAL virtual
-#endif
-
- VIRTUAL void call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments);
-
- //
- // It is imperative that all calls into the VM are handled via the call_VM macros.
- // They make sure that the stack linkage is setup correctly. call_VM's correspond
- // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
- //
- // This is the base routine called by the different versions of call_VM. The interpreter
- // may customize this version by overriding it for its purposes (e.g., to save/restore
- // additional registers when doing a VM call).
- //
- // A non-volatile java_thread_cache register should be specified so
- // that the G2_thread value can be preserved across the call.
- // (If java_thread_cache is noreg, then a slow get_thread call
- // will re-initialize the G2_thread.) call_VM_base returns the register that contains the
- // thread.
- //
- // If no last_java_sp is specified (noreg) than SP will be used instead.
-
- virtual void call_VM_base(
- Register oop_result, // where an oop-result ends up if any; use noreg otherwise
- Register java_thread_cache, // the thread if computed before ; use noreg otherwise
- Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
- address entry_point, // the entry point
- int number_of_arguments, // the number of arguments (w/o thread) to pop after call
- bool check_exception=true // flag which indicates if exception should be checked
- );
-
- // This routine should emit JVMTI PopFrame and ForceEarlyReturn handling code.
- // The implementation is only non-empty for the InterpreterMacroAssembler,
- // as only the interpreter handles and ForceEarlyReturn PopFrame requests.
- virtual void check_and_handle_popframe(Register scratch_reg);
- virtual void check_and_handle_earlyret(Register scratch_reg);
-
- public:
- MacroAssembler(CodeBuffer* code) : Assembler(code) {}
-
- // Support for NULL-checks
- //
- // Generates code that causes a NULL OS exception if the content of reg is NULL.
- // If the accessed location is M[reg + offset] and the offset is known, provide the
- // offset. No explicit code generation is needed if the offset is within a certain
- // range (0 <= offset <= page_size).
- //
- // %%%%%% Currently not done for SPARC
-
- void null_check(Register reg, int offset = -1);
- static bool needs_explicit_null_check(intptr_t offset);
-
- // support for delayed instructions
- MacroAssembler* delayed() { Assembler::delayed(); return this; }
-
- // branches that use right instruction for v8 vs. v9
- inline void br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
- inline void br( Condition c, bool a, Predict p, Label& L );
-
- inline void fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
- inline void fb( Condition c, bool a, Predict p, Label& L );
-
- // compares register with zero (32 bit) and branches (V9 and V8 instructions)
- void cmp_zero_and_br( Condition c, Register s1, Label& L, bool a = false, Predict p = pn );
- // Compares a pointer register with zero and branches on (not)null.
- // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
- void br_null ( Register s1, bool a, Predict p, Label& L );
- void br_notnull( Register s1, bool a, Predict p, Label& L );
-
- //
- // Compare registers and branch with nop in delay slot or cbcond without delay slot.
- //
- // ATTENTION: use these instructions with caution because cbcond instruction
- // has very short distance: 512 instructions (2Kbyte).
-
- // Compare integer (32 bit) values (icc only).
- void cmp_and_br_short(Register s1, Register s2, Condition c, Predict p, Label& L);
- void cmp_and_br_short(Register s1, int simm13a, Condition c, Predict p, Label& L);
- // Platform depending version for pointer compare (icc on !LP64 and xcc on LP64).
- void cmp_and_brx_short(Register s1, Register s2, Condition c, Predict p, Label& L);
- void cmp_and_brx_short(Register s1, int simm13a, Condition c, Predict p, Label& L);
-
- // Short branch version for compares a pointer pwith zero.
- void br_null_short ( Register s1, Predict p, Label& L );
- void br_notnull_short( Register s1, Predict p, Label& L );
-
- // unconditional short branch
- void ba_short(Label& L);
-
- inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
- inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
-
- // Branch that tests xcc in LP64 and icc in !LP64
- inline void brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
- inline void brx( Condition c, bool a, Predict p, Label& L );
-
- // unconditional branch
- inline void ba( Label& L );
-
- // Branch that tests fp condition codes
- inline void fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
- inline void fbp( Condition c, bool a, CC cc, Predict p, Label& L );
-
- // get PC the best way
- inline int get_pc( Register d );
-
- // Sparc shorthands(pp 85, V8 manual, pp 289 V9 manual)
- inline void cmp( Register s1, Register s2 ) { subcc( s1, s2, G0 ); }
- inline void cmp( Register s1, int simm13a ) { subcc( s1, simm13a, G0 ); }
-
- inline void jmp( Register s1, Register s2 );
- inline void jmp( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
-
- // Check if the call target is out of wdisp30 range (relative to the code cache)
- static inline bool is_far_target(address d);
- inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type );
- inline void call( Label& L, relocInfo::relocType rt = relocInfo::runtime_call_type );
- inline void callr( Register s1, Register s2 );
- inline void callr( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
-
- // Emits nothing on V8
- inline void iprefetch( address d, relocInfo::relocType rt = relocInfo::none );
- inline void iprefetch( Label& L);
-
- inline void tst( Register s ) { orcc( G0, s, G0 ); }
-
-#ifdef PRODUCT
- inline void ret( bool trace = TraceJumps ) { if (trace) {
- mov(I7, O7); // traceable register
- JMP(O7, 2 * BytesPerInstWord);
- } else {
- jmpl( I7, 2 * BytesPerInstWord, G0 );
- }
- }
-
- inline void retl( bool trace = TraceJumps ) { if (trace) JMP(O7, 2 * BytesPerInstWord);
- else jmpl( O7, 2 * BytesPerInstWord, G0 ); }
-#else
- void ret( bool trace = TraceJumps );
- void retl( bool trace = TraceJumps );
-#endif /* PRODUCT */
-
- // Required platform-specific helpers for Label::patch_instructions.
- // They _shadow_ the declarations in AbstractAssembler, which are undefined.
- void pd_patch_instruction(address branch, address target);
-#ifndef PRODUCT
- static void pd_print_patched_instruction(address branch);
-#endif
-
- // sethi Macro handles optimizations and relocations
-private:
- void internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable);
-public:
- void sethi(const AddressLiteral& addrlit, Register d);
- void patchable_sethi(const AddressLiteral& addrlit, Register d);
-
- // compute the number of instructions for a sethi/set
- static int insts_for_sethi( address a, bool worst_case = false );
- static int worst_case_insts_for_set();
-
- // set may be either setsw or setuw (high 32 bits may be zero or sign)
-private:
- void internal_set(const AddressLiteral& al, Register d, bool ForceRelocatable);
- static int insts_for_internal_set(intptr_t value);
-public:
- void set(const AddressLiteral& addrlit, Register d);
- void set(intptr_t value, Register d);
- void set(address addr, Register d, RelocationHolder const& rspec);
- static int insts_for_set(intptr_t value) { return insts_for_internal_set(value); }
-
- void patchable_set(const AddressLiteral& addrlit, Register d);
- void patchable_set(intptr_t value, Register d);
- void set64(jlong value, Register d, Register tmp);
- static int insts_for_set64(jlong value);
-
- // sign-extend 32 to 64
- inline void signx( Register s, Register d ) { sra( s, G0, d); }
- inline void signx( Register d ) { sra( d, G0, d); }
-
- inline void not1( Register s, Register d ) { xnor( s, G0, d ); }
- inline void not1( Register d ) { xnor( d, G0, d ); }
-
- inline void neg( Register s, Register d ) { sub( G0, s, d ); }
- inline void neg( Register d ) { sub( G0, d, d ); }
-
- inline void cas( Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY); }
- inline void casx( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY); }
- // Functions for isolating 64 bit atomic swaps for LP64
- // cas_ptr will perform cas for 32 bit VM's and casx for 64 bit VM's
- inline void cas_ptr( Register s1, Register s2, Register d) {
-#ifdef _LP64
- casx( s1, s2, d );
-#else
- cas( s1, s2, d );
-#endif
- }
-
- // Functions for isolating 64 bit shifts for LP64
- inline void sll_ptr( Register s1, Register s2, Register d );
- inline void sll_ptr( Register s1, int imm6a, Register d );
- inline void sll_ptr( Register s1, RegisterOrConstant s2, Register d );
- inline void srl_ptr( Register s1, Register s2, Register d );
- inline void srl_ptr( Register s1, int imm6a, Register d );
-
- // little-endian
- inline void casl( Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY_LITTLE); }
- inline void casxl( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY_LITTLE); }
-
- inline void inc( Register d, int const13 = 1 ) { add( d, const13, d); }
- inline void inccc( Register d, int const13 = 1 ) { addcc( d, const13, d); }
-
- inline void dec( Register d, int const13 = 1 ) { sub( d, const13, d); }
- inline void deccc( Register d, int const13 = 1 ) { subcc( d, const13, d); }
-
- inline void btst( Register s1, Register s2 ) { andcc( s1, s2, G0 ); }
- inline void btst( int simm13a, Register s ) { andcc( s, simm13a, G0 ); }
-
- inline void bset( Register s1, Register s2 ) { or3( s1, s2, s2 ); }
- inline void bset( int simm13a, Register s ) { or3( s, simm13a, s ); }
-
- inline void bclr( Register s1, Register s2 ) { andn( s1, s2, s2 ); }
- inline void bclr( int simm13a, Register s ) { andn( s, simm13a, s ); }
-
- inline void btog( Register s1, Register s2 ) { xor3( s1, s2, s2 ); }
- inline void btog( int simm13a, Register s ) { xor3( s, simm13a, s ); }
-
- inline void clr( Register d ) { or3( G0, G0, d ); }
-
- inline void clrb( Register s1, Register s2);
- inline void clrh( Register s1, Register s2);
- inline void clr( Register s1, Register s2);
- inline void clrx( Register s1, Register s2);
-
- inline void clrb( Register s1, int simm13a);
- inline void clrh( Register s1, int simm13a);
- inline void clr( Register s1, int simm13a);
- inline void clrx( Register s1, int simm13a);
-
- // copy & clear upper word
- inline void clruw( Register s, Register d ) { srl( s, G0, d); }
- // clear upper word
- inline void clruwu( Register d ) { srl( d, G0, d); }
-
- // membar psuedo instruction. takes into account target memory model.
- inline void membar( Assembler::Membar_mask_bits const7a );
-
- // returns if membar generates anything.
- inline bool membar_has_effect( Assembler::Membar_mask_bits const7a );
-
- // mov pseudo instructions
- inline void mov( Register s, Register d) {
- if ( s != d ) or3( G0, s, d);
- else assert_not_delayed(); // Put something useful in the delay slot!
- }
-
- inline void mov_or_nop( Register s, Register d) {
- if ( s != d ) or3( G0, s, d);
- else nop();
- }
-
- inline void mov( int simm13a, Register d) { or3( G0, simm13a, d); }
-
- // address pseudos: make these names unlike instruction names to avoid confusion
- inline intptr_t load_pc_address( Register reg, int bytes_to_skip );
- inline void load_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
- inline void load_bool_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
- inline void load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
- inline void store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
- inline void store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
- inline void jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset = 0);
- inline void jump_to(const AddressLiteral& addrlit, Register temp, int offset = 0);
- inline void jump_indirect_to(Address& a, Register temp, int ld_offset = 0, int jmp_offset = 0);
-
- // ring buffer traceable jumps
-
- void jmp2( Register r1, Register r2, const char* file, int line );
- void jmp ( Register r1, int offset, const char* file, int line );
-
- void jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line);
- void jump (const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line);
-
-
- // argument pseudos:
-
- inline void load_argument( Argument& a, Register d );
- inline void store_argument( Register s, Argument& a );
- inline void store_ptr_argument( Register s, Argument& a );
- inline void store_float_argument( FloatRegister s, Argument& a );
- inline void store_double_argument( FloatRegister s, Argument& a );
- inline void store_long_argument( Register s, Argument& a );
-
- // handy macros:
-
- inline void round_to( Register r, int modulus ) {
- assert_not_delayed();
- inc( r, modulus - 1 );
- and3( r, -modulus, r );
- }
-
- // --------------------------------------------------
-
- // Functions for isolating 64 bit loads for LP64
- // ld_ptr will perform ld for 32 bit VM's and ldx for 64 bit VM's
- // st_ptr will perform st for 32 bit VM's and stx for 64 bit VM's
- inline void ld_ptr(Register s1, Register s2, Register d);
- inline void ld_ptr(Register s1, int simm13a, Register d);
- inline void ld_ptr(Register s1, RegisterOrConstant s2, Register d);
- inline void ld_ptr(const Address& a, Register d, int offset = 0);
- inline void st_ptr(Register d, Register s1, Register s2);
- inline void st_ptr(Register d, Register s1, int simm13a);
- inline void st_ptr(Register d, Register s1, RegisterOrConstant s2);
- inline void st_ptr(Register d, const Address& a, int offset = 0);
-
-#ifdef ASSERT
- // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
- inline void ld_ptr(Register s1, ByteSize simm13a, Register d);
- inline void st_ptr(Register d, Register s1, ByteSize simm13a);
-#endif
-
- // ld_long will perform ldd for 32 bit VM's and ldx for 64 bit VM's
- // st_long will perform std for 32 bit VM's and stx for 64 bit VM's
- inline void ld_long(Register s1, Register s2, Register d);
- inline void ld_long(Register s1, int simm13a, Register d);
- inline void ld_long(Register s1, RegisterOrConstant s2, Register d);
- inline void ld_long(const Address& a, Register d, int offset = 0);
- inline void st_long(Register d, Register s1, Register s2);
- inline void st_long(Register d, Register s1, int simm13a);
- inline void st_long(Register d, Register s1, RegisterOrConstant s2);
- inline void st_long(Register d, const Address& a, int offset = 0);
-
- // Helpers for address formation.
- // - They emit only a move if s2 is a constant zero.
- // - If dest is a constant and either s1 or s2 is a register, the temp argument is required and becomes the result.
- // - If dest is a register and either s1 or s2 is a non-simm13 constant, the temp argument is required and used to materialize the constant.
- RegisterOrConstant regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
- RegisterOrConstant regcon_inc_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
- RegisterOrConstant regcon_sll_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
-
- RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant src, Register temp) {
- if (is_simm13(src.constant_or_zero()))
- return src; // register or short constant
- guarantee(temp != noreg, "constant offset overflow");
- set(src.as_constant(), temp);
- return temp;
- }
-
- // --------------------------------------------------
-
- public:
- // traps as per trap.h (SPARC ABI?)
-
- void breakpoint_trap();
- void breakpoint_trap(Condition c, CC cc);
- void flush_windows_trap();
- void clean_windows_trap();
- void get_psr_trap();
- void set_psr_trap();
-
- // V8/V9 flush_windows
- void flush_windows();
-
- // Support for serializing memory accesses between threads
- void serialize_memory(Register thread, Register tmp1, Register tmp2);
-
- // Stack frame creation/removal
- void enter();
- void leave();
-
- // V8/V9 integer multiply
- void mult(Register s1, Register s2, Register d);
- void mult(Register s1, int simm13a, Register d);
-
- // V8/V9 read and write of condition codes.
- void read_ccr(Register d);
- void write_ccr(Register s);
-
- // Manipulation of C++ bools
- // These are idioms to flag the need for care with accessing bools but on
- // this platform we assume byte size
-
- inline void stbool(Register d, const Address& a) { stb(d, a); }
- inline void ldbool(const Address& a, Register d) { ldub(a, d); }
- inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); }
-
- // klass oop manipulations if compressed
- void load_klass(Register src_oop, Register klass);
- void store_klass(Register klass, Register dst_oop);
- void store_klass_gap(Register s, Register dst_oop);
-
- // oop manipulations
- void load_heap_oop(const Address& s, Register d);
- void load_heap_oop(Register s1, Register s2, Register d);
- void load_heap_oop(Register s1, int simm13a, Register d);
- void load_heap_oop(Register s1, RegisterOrConstant s2, Register d);
- void store_heap_oop(Register d, Register s1, Register s2);
- void store_heap_oop(Register d, Register s1, int simm13a);
- void store_heap_oop(Register d, const Address& a, int offset = 0);
-
- void encode_heap_oop(Register src, Register dst);
- void encode_heap_oop(Register r) {
- encode_heap_oop(r, r);
- }
- void decode_heap_oop(Register src, Register dst);
- void decode_heap_oop(Register r) {
- decode_heap_oop(r, r);
- }
- void encode_heap_oop_not_null(Register r);
- void decode_heap_oop_not_null(Register r);
- void encode_heap_oop_not_null(Register src, Register dst);
- void decode_heap_oop_not_null(Register src, Register dst);
-
- void encode_klass_not_null(Register r);
- void decode_klass_not_null(Register r);
- void encode_klass_not_null(Register src, Register dst);
- void decode_klass_not_null(Register src, Register dst);
-
- // Support for managing the JavaThread pointer (i.e.; the reference to
- // thread-local information).
- void get_thread(); // load G2_thread
- void verify_thread(); // verify G2_thread contents
- void save_thread (const Register threache); // save to cache
- void restore_thread(const Register thread_cache); // restore from cache
-
- // Support for last Java frame (but use call_VM instead where possible)
- void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
- void reset_last_Java_frame(void);
-
- // Call into the VM.
- // Passes the thread pointer (in O0) as a prepended argument.
- // Makes sure oop return values are visible to the GC.
- void call_VM(Register oop_result, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
- void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
- void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
- void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
-
- // these overloadings are not presently used on SPARC:
- void call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
- void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
- void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
- void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
-
- void call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments = 0);
- void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1);
- void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2);
- void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3);
-
- void get_vm_result (Register oop_result);
- void get_vm_result_2(Register metadata_result);
-
- // vm result is currently getting hijacked to for oop preservation
- void set_vm_result(Register oop_result);
-
- // Emit the CompiledIC call idiom
- void ic_call(address entry, bool emit_delay = true);
-
- // if call_VM_base was called with check_exceptions=false, then call
- // check_and_forward_exception to handle exceptions when it is safe
- void check_and_forward_exception(Register scratch_reg);
-
- private:
- // For V8
- void read_ccr_trap(Register ccr_save);
- void write_ccr_trap(Register ccr_save1, Register scratch1, Register scratch2);
-
-#ifdef ASSERT
- // For V8 debugging. Uses V8 instruction sequence and checks
- // result with V9 insturctions rdccr and wrccr.
- // Uses Gscatch and Gscatch2
- void read_ccr_v8_assert(Register ccr_save);
- void write_ccr_v8_assert(Register ccr_save);
-#endif // ASSERT
-
- public:
-
- // Write to card table for - register is destroyed afterwards.
- void card_table_write(jbyte* byte_map_base, Register tmp, Register obj);
-
- void card_write_barrier_post(Register store_addr, Register new_val, Register tmp);
-
-#ifndef SERIALGC
- // General G1 pre-barrier generator.
- void g1_write_barrier_pre(Register obj, Register index, int offset, Register pre_val, Register tmp, bool preserve_o_regs);
-
- // General G1 post-barrier generator
- void g1_write_barrier_post(Register store_addr, Register new_val, Register tmp);
-#endif // SERIALGC
-
- // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
- void push_fTOS();
-
- // pops double TOS element from CPU stack and pushes on FPU stack
- void pop_fTOS();
-
- void empty_FPU_stack();
-
- void push_IU_state();
- void pop_IU_state();
-
- void push_FPU_state();
- void pop_FPU_state();
-
- void push_CPU_state();
- void pop_CPU_state();
-
- // if heap base register is used - reinit it with the correct value
- void reinit_heapbase();
-
- // Debugging
- void _verify_oop(Register reg, const char * msg, const char * file, int line);
- void _verify_oop_addr(Address addr, const char * msg, const char * file, int line);
-
- // TODO: verify_method and klass metadata (compare against vptr?)
- void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
- void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
-
-#define verify_oop(reg) _verify_oop(reg, "broken oop " #reg, __FILE__, __LINE__)
-#define verify_oop_addr(addr) _verify_oop_addr(addr, "broken oop addr ", __FILE__, __LINE__)
-#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
-#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
-
- // only if +VerifyOops
- void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
- // only if +VerifyFPU
- void stop(const char* msg); // prints msg, dumps registers and stops execution
- void warn(const char* msg); // prints msg, but don't stop
- void untested(const char* what = "");
- void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); }
- void should_not_reach_here() { stop("should not reach here"); }
- void print_CPU_state();
-
- // oops in code
- AddressLiteral allocate_oop_address(jobject obj); // allocate_index
- AddressLiteral constant_oop_address(jobject obj); // find_index
- inline void set_oop (jobject obj, Register d); // uses allocate_oop_address
- inline void set_oop_constant (jobject obj, Register d); // uses constant_oop_address
- inline void set_oop (const AddressLiteral& obj_addr, Register d); // same as load_address
-
- // metadata in code that we have to keep track of
- AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index
- AddressLiteral constant_metadata_address(Metadata* obj); // find_index
- inline void set_metadata (Metadata* obj, Register d); // uses allocate_metadata_address
- inline void set_metadata_constant (Metadata* obj, Register d); // uses constant_metadata_address
- inline void set_metadata (const AddressLiteral& obj_addr, Register d); // same as load_address
-
- void set_narrow_oop( jobject obj, Register d );
- void set_narrow_klass( Klass* k, Register d );
-
- // nop padding
- void align(int modulus);
-
- // declare a safepoint
- void safepoint();
-
- // factor out part of stop into subroutine to save space
- void stop_subroutine();
- // factor out part of verify_oop into subroutine to save space
- void verify_oop_subroutine();
-
- // side-door communication with signalHandler in os_solaris.cpp
- static address _verify_oop_implicit_branch[3];
-
-#ifndef PRODUCT
- static void test();
-#endif
-
- int total_frame_size_in_bytes(int extraWords);
-
- // used when extraWords known statically
- void save_frame(int extraWords = 0);
- void save_frame_c1(int size_in_bytes);
- // make a frame, and simultaneously pass up one or two register value
- // into the new register window
- void save_frame_and_mov(int extraWords, Register s1, Register d1, Register s2 = Register(), Register d2 = Register());
-
- // give no. (outgoing) params, calc # of words will need on frame
- void calc_mem_param_words(Register Rparam_words, Register Rresult);
-
- // used to calculate frame size dynamically
- // result is in bytes and must be negated for save inst
- void calc_frame_size(Register extraWords, Register resultReg);
-
- // calc and also save
- void calc_frame_size_and_save(Register extraWords, Register resultReg);
-
- static void debug(char* msg, RegistersForDebugging* outWindow);
-
- // implementations of bytecodes used by both interpreter and compiler
-
- void lcmp( Register Ra_hi, Register Ra_low,
- Register Rb_hi, Register Rb_low,
- Register Rresult);
-
- void lneg( Register Rhi, Register Rlow );
-
- void lshl( Register Rin_high, Register Rin_low, Register Rcount,
- Register Rout_high, Register Rout_low, Register Rtemp );
-
- void lshr( Register Rin_high, Register Rin_low, Register Rcount,
- Register Rout_high, Register Rout_low, Register Rtemp );
-
- void lushr( Register Rin_high, Register Rin_low, Register Rcount,
- Register Rout_high, Register Rout_low, Register Rtemp );
-
-#ifdef _LP64
- void lcmp( Register Ra, Register Rb, Register Rresult);
-#endif
-
- // Load and store values by size and signed-ness
- void load_sized_value( Address src, Register dst, size_t size_in_bytes, bool is_signed);
- void store_sized_value(Register src, Address dst, size_t size_in_bytes);
-
- void float_cmp( bool is_float, int unordered_result,
- FloatRegister Fa, FloatRegister Fb,
- Register Rresult);
-
- void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
- void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { Assembler::fneg(w, sd); }
- void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
- void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
-
- void save_all_globals_into_locals();
- void restore_globals_from_locals();
-
- void casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
- address lock_addr=0, bool use_call_vm=false);
- void cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
- address lock_addr=0, bool use_call_vm=false);
- void casn (Register addr_reg, Register cmp_reg, Register set_reg) ;
-
- // These set the icc condition code to equal if the lock succeeded
- // and notEqual if it failed and requires a slow case
- void compiler_lock_object(Register Roop, Register Rmark, Register Rbox,
- Register Rscratch,
- BiasedLockingCounters* counters = NULL,
- bool try_bias = UseBiasedLocking);
- void compiler_unlock_object(Register Roop, Register Rmark, Register Rbox,
- Register Rscratch,
- bool try_bias = UseBiasedLocking);
-
- // Biased locking support
- // Upon entry, lock_reg must point to the lock record on the stack,
- // obj_reg must contain the target object, and mark_reg must contain
- // the target object's header.
- // Destroys mark_reg if an attempt is made to bias an anonymously
- // biased lock. In this case a failure will go either to the slow
- // case or fall through with the notEqual condition code set with
- // the expectation that the slow case in the runtime will be called.
- // In the fall-through case where the CAS-based lock is done,
- // mark_reg is not destroyed.
- void biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg,
- Label& done, Label* slow_case = NULL,
- BiasedLockingCounters* counters = NULL);
- // Upon entry, the base register of mark_addr must contain the oop.
- // Destroys temp_reg.
-
- // If allow_delay_slot_filling is set to true, the next instruction
- // emitted after this one will go in an annulled delay slot if the
- // biased locking exit case failed.
- void biased_locking_exit(Address mark_addr, Register temp_reg, Label& done, bool allow_delay_slot_filling = false);
-
- // allocation
- void eden_allocate(
- Register obj, // result: pointer to object after successful allocation
- Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
- int con_size_in_bytes, // object size in bytes if known at compile time
- Register t1, // temp register
- Register t2, // temp register
- Label& slow_case // continuation point if fast allocation fails
- );
- void tlab_allocate(
- Register obj, // result: pointer to object after successful allocation
- Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
- int con_size_in_bytes, // object size in bytes if known at compile time
- Register t1, // temp register
- Label& slow_case // continuation point if fast allocation fails
- );
- void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
- void incr_allocated_bytes(RegisterOrConstant size_in_bytes,
- Register t1, Register t2);
-
- // interface method calling
- void lookup_interface_method(Register recv_klass,
- Register intf_klass,
- RegisterOrConstant itable_index,
- Register method_result,
- Register temp_reg, Register temp2_reg,
- Label& no_such_interface);
-
- // virtual method calling
- void lookup_virtual_method(Register recv_klass,
- RegisterOrConstant vtable_index,
- Register method_result);
-
- // Test sub_klass against super_klass, with fast and slow paths.
-
- // The fast path produces a tri-state answer: yes / no / maybe-slow.
- // One of the three labels can be NULL, meaning take the fall-through.
- // If super_check_offset is -1, the value is loaded up from super_klass.
- // No registers are killed, except temp_reg and temp2_reg.
- // If super_check_offset is not -1, temp2_reg is not used and can be noreg.
- void check_klass_subtype_fast_path(Register sub_klass,
- Register super_klass,
- Register temp_reg,
- Register temp2_reg,
- Label* L_success,
- Label* L_failure,
- Label* L_slow_path,
- RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
-
- // The rest of the type check; must be wired to a corresponding fast path.
- // It does not repeat the fast path logic, so don't use it standalone.
- // The temp_reg can be noreg, if no temps are available.
- // It can also be sub_klass or super_klass, meaning it's OK to kill that one.
- // Updates the sub's secondary super cache as necessary.
- void check_klass_subtype_slow_path(Register sub_klass,
- Register super_klass,
- Register temp_reg,
- Register temp2_reg,
- Register temp3_reg,
- Register temp4_reg,
- Label* L_success,
- Label* L_failure);
-
- // Simplified, combined version, good for typical uses.
- // Falls through on failure.
- void check_klass_subtype(Register sub_klass,
- Register super_klass,
- Register temp_reg,
- Register temp2_reg,
- Label& L_success);
-
- // method handles (JSR 292)
- // offset relative to Gargs of argument at tos[arg_slot].
- // (arg_slot == 0 means the last argument, not the first).
- RegisterOrConstant argument_offset(RegisterOrConstant arg_slot,
- Register temp_reg,
- int extra_slot_offset = 0);
- // Address of Gargs and argument_offset.
- Address argument_address(RegisterOrConstant arg_slot,
- Register temp_reg = noreg,
- int extra_slot_offset = 0);
-
- // Stack overflow checking
-
- // Note: this clobbers G3_scratch
- void bang_stack_with_offset(int offset) {
- // stack grows down, caller passes positive offset
- assert(offset > 0, "must bang with negative offset");
- set((-offset)+STACK_BIAS, G3_scratch);
- st(G0, SP, G3_scratch);
- }
-
- // Writes to stack successive pages until offset reached to check for
- // stack overflow + shadow pages. Clobbers tsp and scratch registers.
- void bang_stack_size(Register Rsize, Register Rtsp, Register Rscratch);
-
- virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset);
-
- void verify_tlab();
-
- Condition negate_condition(Condition cond);
-
- // Helper functions for statistics gathering.
- // Conditionally (non-atomically) increments passed counter address, preserving condition codes.
- void cond_inc(Condition cond, address counter_addr, Register Rtemp1, Register Rtemp2);
- // Unconditional increment.
- void inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2);
- void inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2);
-
- // Compare char[] arrays aligned to 4 bytes.
- void char_arrays_equals(Register ary1, Register ary2,
- Register limit, Register result,
- Register chr1, Register chr2, Label& Ldone);
- // Use BIS for zeroing
- void bis_zeroing(Register to, Register count, Register temp, Label& Ldone);
-
-#undef VIRTUAL
-
-};
-
-/**
- * class SkipIfEqual:
- *
- * Instantiating this class will result in assembly code being output that will
- * jump around any code emitted between the creation of the instance and it's
- * automatic destruction at the end of a scope block, depending on the value of
- * the flag passed to the constructor, which will be checked at run-time.
- */
-class SkipIfEqual : public StackObj {
- private:
- MacroAssembler* _masm;
- Label _label;
-
- public:
- // 'temp' is a temp register that this object can use (and trash)
- SkipIfEqual(MacroAssembler*, Register temp,
- const bool* flag_addr, Assembler::Condition condition);
- ~SkipIfEqual();
-};
-
-#ifdef ASSERT
-// On RISC, there's no benefit to verifying instruction boundaries.
-inline bool AbstractAssembler::pd_check_instruction_mark() { return false; }
-#endif
-
#endif // CPU_SPARC_VM_ASSEMBLER_SPARC_HPP
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,33 +25,8 @@
#ifndef CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP
#define CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP
-#include "asm/assembler.inline.hpp"
-#include "asm/codeBuffer.hpp"
-#include "code/codeCache.hpp"
-#include "runtime/handles.inline.hpp"
-
-inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
- jint& stub_inst = *(jint*) branch;
- stub_inst = patched_branch(target - branch, stub_inst, 0);
-}
+#include "asm/assembler.hpp"
-#ifndef PRODUCT
-inline void MacroAssembler::pd_print_patched_instruction(address branch) {
- jint stub_inst = *(jint*) branch;
- print_instruction(stub_inst);
- ::tty->print("%s", " (unresolved)");
-}
-#endif // PRODUCT
-
-inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); }
-
-
-inline int AddressLiteral::low10() const {
- return Assembler::low10(value());
-}
-
-
-// inlines for SPARC assembler -- dmu 5/97
inline void Assembler::check_delay() {
# ifdef CHECK_DELAY
@@ -76,9 +51,8 @@
}
-inline void Assembler::add(Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rtype ); }
-inline void Assembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec ); }
+inline void Assembler::add(Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); }
+inline void Assembler::add(Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); }
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { bpr( c, a, p, s1, target(L)); }
@@ -111,16 +85,9 @@
inline void Assembler::jmpl( Register s1, Register s2, Register d ) { cti(); emit_long( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { cti(); emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); }
-inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) {
- if (s2.is_register()) ldf(w, s1, s2.as_register(), d);
- else ldf(w, s1, s2.as_constant(), d);
-}
-
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
-inline void Assembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) { relocate(a.rspec(offset)); ldf( w, a.base(), a.disp() + offset, d); }
-
inline void Assembler::ldfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
@@ -152,98 +119,9 @@
inline void Assembler::ldd( Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-#ifdef _LP64
-// Make all 32 bit loads signed so 64 bit registers maintain proper sign
-inline void Assembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); }
-inline void Assembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); }
-#else
-inline void Assembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); }
-inline void Assembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); }
-#endif
-
-#ifdef ASSERT
- // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
-# ifdef _LP64
-inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
-# else
-inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
-# endif
-#endif
-
-inline void Assembler::ld( const Address& a, Register d, int offset) {
- if (a.has_index()) { assert(offset == 0, ""); ld( a.base(), a.index(), d); }
- else { ld( a.base(), a.disp() + offset, d); }
-}
-inline void Assembler::ldsb(const Address& a, Register d, int offset) {
- if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(), d); }
- else { ldsb(a.base(), a.disp() + offset, d); }
-}
-inline void Assembler::ldsh(const Address& a, Register d, int offset) {
- if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(), d); }
- else { ldsh(a.base(), a.disp() + offset, d); }
-}
-inline void Assembler::ldsw(const Address& a, Register d, int offset) {
- if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(), d); }
- else { ldsw(a.base(), a.disp() + offset, d); }
-}
-inline void Assembler::ldub(const Address& a, Register d, int offset) {
- if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(), d); }
- else { ldub(a.base(), a.disp() + offset, d); }
-}
-inline void Assembler::lduh(const Address& a, Register d, int offset) {
- if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(), d); }
- else { lduh(a.base(), a.disp() + offset, d); }
-}
-inline void Assembler::lduw(const Address& a, Register d, int offset) {
- if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(), d); }
- else { lduw(a.base(), a.disp() + offset, d); }
-}
-inline void Assembler::ldd( const Address& a, Register d, int offset) {
- if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(), d); }
- else { ldd( a.base(), a.disp() + offset, d); }
-}
-inline void Assembler::ldx( const Address& a, Register d, int offset) {
- if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(), d); }
- else { ldx( a.base(), a.disp() + offset, d); }
-}
-
-inline void Assembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); }
-inline void Assembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); }
-inline void Assembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); }
-inline void Assembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); }
-inline void Assembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); }
-inline void Assembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); }
-inline void Assembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); }
-inline void Assembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld( Address(s1, s2), d); }
-inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
-
-// form effective addresses this way:
-inline void Assembler::add(const Address& a, Register d, int offset) {
- if (a.has_index()) add(a.base(), a.index(), d);
- else { add(a.base(), a.disp() + offset, d, a.rspec(offset)); offset = 0; }
- if (offset != 0) add(d, offset, d);
-}
-inline void Assembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) {
- if (s2.is_register()) add(s1, s2.as_register(), d);
- else { add(s1, s2.as_constant() + offset, d); offset = 0; }
- if (offset != 0) add(d, offset, d);
-}
-
-inline void Assembler::andn(Register s1, RegisterOrConstant s2, Register d) {
- if (s2.is_register()) andn(s1, s2.as_register(), d);
- else andn(s1, s2.as_constant(), d);
-}
-
inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-
-inline void Assembler::prefetch(Register s1, Register s2, PrefetchFcn f) { v9_only(); emit_long( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::prefetch(Register s1, int simm13a, PrefetchFcn f) { v9_only(); emit_data( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-
-inline void Assembler::prefetch(const Address& a, PrefetchFcn f, int offset) { v9_only(); relocate(a.rspec(offset)); prefetch(a.base(), a.disp() + offset, f); }
-
-
inline void Assembler::rett( Register s1, Register s2 ) { cti(); emit_long( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti(); emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt); has_delay_slot(); }
@@ -251,20 +129,9 @@
// pp 222
-inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2) {
- if (s2.is_register()) stf(w, d, s1, s2.as_register());
- else stf(w, d, s1, s2.as_constant());
-}
-
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) {
- relocate(a.rspec(offset));
- if (a.has_index()) { assert(offset == 0, ""); stf(w, d, a.base(), a.index() ); }
- else { stf(w, d, a.base(), a.disp() + offset); }
-}
-
inline void Assembler::stfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::stxfsr( Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
@@ -285,46 +152,6 @@
inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::st( Register d, Register s1, Register s2) { stw(d, s1, s2); }
-inline void Assembler::st( Register d, Register s1, int simm13a) { stw(d, s1, simm13a); }
-
-#ifdef ASSERT
-// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
-inline void Assembler::st( Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); }
-#endif
-
-inline void Assembler::stb(Register d, const Address& a, int offset) {
- if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index() ); }
- else { stb(d, a.base(), a.disp() + offset); }
-}
-inline void Assembler::sth(Register d, const Address& a, int offset) {
- if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index() ); }
- else { sth(d, a.base(), a.disp() + offset); }
-}
-inline void Assembler::stw(Register d, const Address& a, int offset) {
- if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index() ); }
- else { stw(d, a.base(), a.disp() + offset); }
-}
-inline void Assembler::st( Register d, const Address& a, int offset) {
- if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index() ); }
- else { st( d, a.base(), a.disp() + offset); }
-}
-inline void Assembler::std(Register d, const Address& a, int offset) {
- if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index() ); }
- else { std(d, a.base(), a.disp() + offset); }
-}
-inline void Assembler::stx(Register d, const Address& a, int offset) {
- if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index() ); }
- else { stx(d, a.base(), a.disp() + offset); }
-}
-
-inline void Assembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); }
-inline void Assembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); }
-inline void Assembler::stw(Register d, Register s1, RegisterOrConstant s2) { stw(d, Address(s1, s2)); }
-inline void Assembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); }
-inline void Assembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); }
-inline void Assembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); }
-
// v8 p 99
inline void Assembler::stc( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); }
@@ -336,561 +163,9 @@
inline void Assembler::stdcq( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stdcq( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::sub(Register s1, RegisterOrConstant s2, Register d, int offset) {
- if (s2.is_register()) sub(s1, s2.as_register(), d);
- else { sub(s1, s2.as_constant() + offset, d); offset = 0; }
- if (offset != 0) sub(d, offset, d);
-}
-
// pp 231
inline void Assembler::swap( Register s1, Register s2, Register d) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::swap( Register s1, int simm13a, Register d) { v9_dep(); emit_data( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::swap( Address& a, Register d, int offset ) {
- relocate(a.rspec(offset));
- if (a.has_index()) { assert(offset == 0, ""); swap( a.base(), a.index(), d ); }
- else { swap( a.base(), a.disp() + offset, d ); }
-}
-
-
-// Use the right loads/stores for the platform
-inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
-#ifdef _LP64
- Assembler::ldx(s1, s2, d);
-#else
- Assembler::ld( s1, s2, d);
-#endif
-}
-
-inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
-#ifdef _LP64
- Assembler::ldx(s1, simm13a, d);
-#else
- Assembler::ld( s1, simm13a, d);
-#endif
-}
-
-#ifdef ASSERT
-// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
-inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) {
- ld_ptr(s1, in_bytes(simm13a), d);
-}
-#endif
-
-inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
-#ifdef _LP64
- Assembler::ldx(s1, s2, d);
-#else
- Assembler::ld( s1, s2, d);
-#endif
-}
-
-inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
-#ifdef _LP64
- Assembler::ldx(a, d, offset);
-#else
- Assembler::ld( a, d, offset);
-#endif
-}
-
-inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
-#ifdef _LP64
- Assembler::stx(d, s1, s2);
-#else
- Assembler::st( d, s1, s2);
-#endif
-}
-
-inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
-#ifdef _LP64
- Assembler::stx(d, s1, simm13a);
-#else
- Assembler::st( d, s1, simm13a);
-#endif
-}
-
-#ifdef ASSERT
-// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
-inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) {
- st_ptr(d, s1, in_bytes(simm13a));
-}
-#endif
-
-inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
-#ifdef _LP64
- Assembler::stx(d, s1, s2);
-#else
- Assembler::st( d, s1, s2);
-#endif
-}
-
-inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
-#ifdef _LP64
- Assembler::stx(d, a, offset);
-#else
- Assembler::st( d, a, offset);
-#endif
-}
-
-// Use the right loads/stores for the platform
-inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) {
-#ifdef _LP64
- Assembler::ldx(s1, s2, d);
-#else
- Assembler::ldd(s1, s2, d);
-#endif
-}
-
-inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) {
-#ifdef _LP64
- Assembler::ldx(s1, simm13a, d);
-#else
- Assembler::ldd(s1, simm13a, d);
-#endif
-}
-
-inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) {
-#ifdef _LP64
- Assembler::ldx(s1, s2, d);
-#else
- Assembler::ldd(s1, s2, d);
-#endif
-}
-
-inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
-#ifdef _LP64
- Assembler::ldx(a, d, offset);
-#else
- Assembler::ldd(a, d, offset);
-#endif
-}
-
-inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) {
-#ifdef _LP64
- Assembler::stx(d, s1, s2);
-#else
- Assembler::std(d, s1, s2);
-#endif
-}
-
-inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) {
-#ifdef _LP64
- Assembler::stx(d, s1, simm13a);
-#else
- Assembler::std(d, s1, simm13a);
-#endif
-}
-
-inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) {
-#ifdef _LP64
- Assembler::stx(d, s1, s2);
-#else
- Assembler::std(d, s1, s2);
-#endif
-}
-
-inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) {
-#ifdef _LP64
- Assembler::stx(d, a, offset);
-#else
- Assembler::std(d, a, offset);
-#endif
-}
-
-// Functions for isolating 64 bit shifts for LP64
-
-inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
-#ifdef _LP64
- Assembler::sllx(s1, s2, d);
-#else
- Assembler::sll( s1, s2, d);
-#endif
-}
-
-inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) {
-#ifdef _LP64
- Assembler::sllx(s1, imm6a, d);
-#else
- Assembler::sll( s1, imm6a, d);
-#endif
-}
-
-inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
-#ifdef _LP64
- Assembler::srlx(s1, s2, d);
-#else
- Assembler::srl( s1, s2, d);
-#endif
-}
-
-inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) {
-#ifdef _LP64
- Assembler::srlx(s1, imm6a, d);
-#else
- Assembler::srl( s1, imm6a, d);
-#endif
-}
-
-inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
- if (s2.is_register()) sll_ptr(s1, s2.as_register(), d);
- else sll_ptr(s1, s2.as_constant(), d);
-}
-
-// Use the right branch for the platform
-
-inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
- if (VM_Version::v9_instructions_work())
- Assembler::bp(c, a, icc, p, d, rt);
- else
- Assembler::br(c, a, d, rt);
-}
-
-inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
- br(c, a, p, target(L));
-}
-
-
-// Branch that tests either xcc or icc depending on the
-// architecture compiled (LP64 or not)
-inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
-#ifdef _LP64
- Assembler::bp(c, a, xcc, p, d, rt);
-#else
- MacroAssembler::br(c, a, p, d, rt);
-#endif
-}
-
-inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
- brx(c, a, p, target(L));
-}
-
-inline void MacroAssembler::ba( Label& L ) {
- br(always, false, pt, L);
-}
-
-// Warning: V9 only functions
-inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
- Assembler::bp(c, a, cc, p, d, rt);
-}
-
-inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) {
- Assembler::bp(c, a, cc, p, L);
-}
-
-inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
- if (VM_Version::v9_instructions_work())
- fbp(c, a, fcc0, p, d, rt);
- else
- Assembler::fb(c, a, d, rt);
-}
-
-inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
- fb(c, a, p, target(L));
-}
-
-inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
- Assembler::fbp(c, a, cc, p, d, rt);
-}
-
-inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) {
- Assembler::fbp(c, a, cc, p, L);
-}
-
-inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); }
-inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); }
-
-inline bool MacroAssembler::is_far_target(address d) {
- if (ForceUnreachable) {
- // References outside the code cache should be treated as far
- return d < CodeCache::low_bound() || d > CodeCache::high_bound();
- }
- return !is_in_wdisp30_range(d, CodeCache::low_bound()) || !is_in_wdisp30_range(d, CodeCache::high_bound());
-}
-
-// Call with a check to see if we need to deal with the added
-// expense of relocation and if we overflow the displacement
-// of the quick call instruction.
-inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
-#ifdef _LP64
- intptr_t disp;
- // NULL is ok because it will be relocated later.
- // Must change NULL to a reachable address in order to
- // pass asserts here and in wdisp.
- if ( d == NULL )
- d = pc();
-
- // Is this address within range of the call instruction?
- // If not, use the expensive instruction sequence
- if (is_far_target(d)) {
- relocate(rt);
- AddressLiteral dest(d);
- jumpl_to(dest, O7, O7);
- } else {
- Assembler::call(d, rt);
- }
-#else
- Assembler::call( d, rt );
-#endif
-}
-
-inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
- MacroAssembler::call( target(L), rt);
-}
-
-
-
-inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); }
-inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); }
-
-// prefetch instruction
-inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
- if (VM_Version::v9_instructions_work())
- Assembler::bp( never, true, xcc, pt, d, rt );
-}
-inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
-
-
-// clobbers o7 on V8!!
-// returns delta from gotten pc to addr after
-inline int MacroAssembler::get_pc( Register d ) {
- int x = offset();
- if (VM_Version::v9_instructions_work())
- rdpc(d);
- else {
- Label lbl;
- Assembler::call(lbl, relocInfo::none); // No relocation as this is call to pc+0x8
- if (d == O7) delayed()->nop();
- else delayed()->mov(O7, d);
- bind(lbl);
- }
- return offset() - x;
-}
-
-
-// Note: All MacroAssembler::set_foo functions are defined out-of-line.
-
-
-// Loads the current PC of the following instruction as an immediate value in
-// 2 instructions. All PCs in the CodeCache are within 2 Gig of each other.
-inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
- intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
-#ifdef _LP64
- Unimplemented();
-#else
- Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
- Assembler::add(reg,thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc));
-#endif
- return thepc;
-}
-
-
-inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Register d, int offset) {
- assert_not_delayed();
- if (ForceUnreachable) {
- patchable_sethi(addrlit, d);
- } else {
- sethi(addrlit, d);
- }
- ld(d, addrlit.low10() + offset, d);
-}
-
-
-inline void MacroAssembler::load_bool_contents(const AddressLiteral& addrlit, Register d, int offset) {
- assert_not_delayed();
- if (ForceUnreachable) {
- patchable_sethi(addrlit, d);
- } else {
- sethi(addrlit, d);
- }
- ldub(d, addrlit.low10() + offset, d);
-}
-
-
-inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) {
- assert_not_delayed();
- if (ForceUnreachable) {
- patchable_sethi(addrlit, d);
- } else {
- sethi(addrlit, d);
- }
- ld_ptr(d, addrlit.low10() + offset, d);
-}
-
-
-inline void MacroAssembler::store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
- assert_not_delayed();
- if (ForceUnreachable) {
- patchable_sethi(addrlit, temp);
- } else {
- sethi(addrlit, temp);
- }
- st(s, temp, addrlit.low10() + offset);
-}
-
-
-inline void MacroAssembler::store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
- assert_not_delayed();
- if (ForceUnreachable) {
- patchable_sethi(addrlit, temp);
- } else {
- sethi(addrlit, temp);
- }
- st_ptr(s, temp, addrlit.low10() + offset);
-}
-
-
-// This code sequence is relocatable to any address, even on LP64.
-inline void MacroAssembler::jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset) {
- assert_not_delayed();
- // Force fixed length sethi because NativeJump and NativeFarCall don't handle
- // variable length instruction streams.
- patchable_sethi(addrlit, temp);
- jmpl(temp, addrlit.low10() + offset, d);
-}
-
-
-inline void MacroAssembler::jump_to(const AddressLiteral& addrlit, Register temp, int offset) {
- jumpl_to(addrlit, temp, G0, offset);
-}
-
-
-inline void MacroAssembler::jump_indirect_to(Address& a, Register temp,
- int ld_offset, int jmp_offset) {
- assert_not_delayed();
- //sethi(al); // sethi is caller responsibility for this one
- ld_ptr(a, temp, ld_offset);
- jmp(temp, jmp_offset);
-}
-
-
-inline void MacroAssembler::set_metadata(Metadata* obj, Register d) {
- set_metadata(allocate_metadata_address(obj), d);
-}
-
-inline void MacroAssembler::set_metadata_constant(Metadata* obj, Register d) {
- set_metadata(constant_metadata_address(obj), d);
-}
-
-inline void MacroAssembler::set_metadata(const AddressLiteral& obj_addr, Register d) {
- assert(obj_addr.rspec().type() == relocInfo::metadata_type, "must be a metadata reloc");
- set(obj_addr, d);
-}
-
-inline void MacroAssembler::set_oop(jobject obj, Register d) {
- set_oop(allocate_oop_address(obj), d);
-}
-
-
-inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
- set_oop(constant_oop_address(obj), d);
-}
-
-
-inline void MacroAssembler::set_oop(const AddressLiteral& obj_addr, Register d) {
- assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
- set(obj_addr, d);
-}
-
-
-inline void MacroAssembler::load_argument( Argument& a, Register d ) {
- if (a.is_register())
- mov(a.as_register(), d);
- else
- ld (a.as_address(), d);
-}
-
-inline void MacroAssembler::store_argument( Register s, Argument& a ) {
- if (a.is_register())
- mov(s, a.as_register());
- else
- st_ptr (s, a.as_address()); // ABI says everything is right justified.
-}
-
-inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) {
- if (a.is_register())
- mov(s, a.as_register());
- else
- st_ptr (s, a.as_address());
-}
-
-
-#ifdef _LP64
-inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) {
- if (a.is_float_register())
-// V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2
- fmov(FloatRegisterImpl::S, s, a.as_float_register() );
- else
- // Floats are stored in the high half of the stack entry
- // The low half is undefined per the ABI.
- stf(FloatRegisterImpl::S, s, a.as_address(), sizeof(jfloat));
-}
-
-inline void MacroAssembler::store_double_argument( FloatRegister s, Argument& a ) {
- if (a.is_float_register())
-// V9 ABI has D0, D2, D4 are used to pass instead of O0, O1, O2
- fmov(FloatRegisterImpl::D, s, a.as_double_register() );
- else
- stf(FloatRegisterImpl::D, s, a.as_address());
-}
-
-inline void MacroAssembler::store_long_argument( Register s, Argument& a ) {
- if (a.is_register())
- mov(s, a.as_register());
- else
- stx(s, a.as_address());
-}
-#endif
-
-inline void MacroAssembler::clrb( Register s1, Register s2) { stb( G0, s1, s2 ); }
-inline void MacroAssembler::clrh( Register s1, Register s2) { sth( G0, s1, s2 ); }
-inline void MacroAssembler::clr( Register s1, Register s2) { stw( G0, s1, s2 ); }
-inline void MacroAssembler::clrx( Register s1, Register s2) { stx( G0, s1, s2 ); }
-
-inline void MacroAssembler::clrb( Register s1, int simm13a) { stb( G0, s1, simm13a); }
-inline void MacroAssembler::clrh( Register s1, int simm13a) { sth( G0, s1, simm13a); }
-inline void MacroAssembler::clr( Register s1, int simm13a) { stw( G0, s1, simm13a); }
-inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm13a); }
-
-// returns if membar generates anything, obviously this code should mirror
-// membar below.
-inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
- if( !os::is_MP() ) return false; // Not needed on single CPU
- if( VM_Version::v9_instructions_work() ) {
- const Membar_mask_bits effective_mask =
- Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
- return (effective_mask != 0);
- } else {
- return true;
- }
-}
-
-inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
- // Uniprocessors do not need memory barriers
- if (!os::is_MP()) return;
- // Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3,
- // 8.4.4.3, a.31 and a.50.
- if( VM_Version::v9_instructions_work() ) {
- // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
- // of the mmask subfield of const7a that does anything that isn't done
- // implicitly is StoreLoad.
- const Membar_mask_bits effective_mask =
- Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
- if ( effective_mask != 0 ) {
- Assembler::membar( effective_mask );
- }
- } else {
- // stbar is the closest there is on v8. Equivalent to membar(StoreStore). We
- // do not issue the stbar because to my knowledge all v8 machines implement TSO,
- // which guarantees that all stores behave as if an stbar were issued just after
- // each one of them. On these machines, stbar ought to be a nop. There doesn't
- // appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it,
- // it can't be specified by stbar, nor have I come up with a way to simulate it.
- //
- // Addendum. Dave says that ldstub guarantees a write buffer flush to coherent
- // space. Put one here to be on the safe side.
- Assembler::ldstub(SP, 0, G0);
- }
-}
-
#endif // CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP
--- a/hotspot/src/cpu/sparc/vm/codeBuffer_sparc.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/codeBuffer_sparc.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -31,9 +31,4 @@
public:
void flush_bundle(bool start_new_bundle) {}
- // Heuristic for pre-packing the pt/pn bit of a predicted branch.
- bool is_backward_branch(Label& L) {
- return L.is_bound() && insts_end() <= locator_address(L.loc());
- }
-
#endif // CPU_SPARC_VM_CODEBUFFER_SPARC_HPP
--- a/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -1048,7 +1048,6 @@
const Address constMethod (G5_method, 0, in_bytes(Method::const_offset()));
const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
- const Address max_stack (G5_method, 0, in_bytes(Method::max_stack_offset()));
const Address size_of_locals (G5_method, 0, in_bytes(Method::size_of_locals_offset()));
// slop factor is two extra slots on the expression stack so that
@@ -1070,7 +1069,9 @@
__ lduh( size_of_parameters, Gtmp );
__ calc_mem_param_words(Gtmp, Gtmp); // space for native call parameters passed on the stack in words
} else {
- __ lduh(max_stack, Gtmp); // Full size expression stack
+ // Full size expression stack
+ __ ld_ptr(constMethod, Gtmp);
+ __ lduh(Gtmp, in_bytes(ConstMethod::max_stack_offset()), Gtmp);
}
__ add(Gtmp, fixed_size, Gtmp); // plus the fixed portion
@@ -1206,7 +1207,9 @@
__ sub(O2, wordSize, O2); // prepush
__ st_ptr(O2, XXX_STATE(_stack)); // PREPUSH
- __ lduh(max_stack, O3); // Full size expression stack
+ // Full size expression stack
+ __ ld_ptr(constMethod, O3);
+ __ lduh(O3, in_bytes(ConstMethod::max_stack_offset()), O3);
guarantee(!EnableInvokeDynamic, "no support yet for java.lang.invoke.MethodHandle"); //6815692
//6815692//if (EnableInvokeDynamic)
//6815692// __ inc(O3, Method::extra_stack_entries());
@@ -1539,7 +1542,6 @@
const Address constMethod (G5_method, 0, in_bytes(Method::const_offset()));
const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
- const Address max_stack (G5_method, 0, in_bytes(Method::max_stack_offset()));
const Address size_of_locals (G5_method, 0, in_bytes(Method::size_of_locals_offset()));
address entry_point = __ pc();
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -648,7 +648,7 @@
Method* m = *interpreter_frame_method_addr();
// validate the method we'd find in this potential sender
- if (!Universe::heap()->is_valid_method(m)) return false;
+ if (!m->is_valid_method()) return false;
// stack frames shouldn't be much larger than max_stack elements
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -204,25 +204,6 @@
intptr_t* out_register_addr(Register reg) const {
return younger_sp_addr_at(reg->after_save()->sp_offset_in_saved_window());
}
- intptr_t* memory_param_addr(int param_ix, bool is_in) const {
- int offset = callee_register_argument_save_area_sp_offset + param_ix;
- if (is_in)
- return fp_addr_at(offset);
- else
- return sp_addr_at(offset);
- }
- intptr_t* param_addr(int param_ix, bool is_in) const {
- if (param_ix >= callee_register_argument_save_area_words)
- return memory_param_addr(param_ix, is_in);
- else if (is_in)
- return register_addr(Argument(param_ix, true).as_register());
- else {
- // the registers are stored in the next younger frame
- // %%% is this really necessary?
- ShouldNotReachHere();
- return NULL;
- }
- }
// Interpreter frames
@@ -269,12 +250,8 @@
#ifndef CC_INTERP
// where Lmonitors is saved:
- BasicObjectLock** interpreter_frame_monitors_addr() const {
- return (BasicObjectLock**) sp_addr_at(Lmonitors->sp_offset_in_saved_window());
- }
- intptr_t** interpreter_frame_esp_addr() const {
- return (intptr_t**)sp_addr_at(Lesp->sp_offset_in_saved_window());
- }
+ inline BasicObjectLock** interpreter_frame_monitors_addr() const;
+ inline intptr_t** interpreter_frame_esp_addr() const;
inline void interpreter_frame_set_tos_address(intptr_t* x);
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,6 +25,8 @@
#ifndef CPU_SPARC_VM_FRAME_SPARC_INLINE_HPP
#define CPU_SPARC_VM_FRAME_SPARC_INLINE_HPP
+#include "asm/macroAssembler.hpp"
+
// Inline functions for SPARC frames:
// Constructors
@@ -185,6 +187,13 @@
return *interpreter_frame_esp_addr() + 1;
}
+inline BasicObjectLock** frame::interpreter_frame_monitors_addr() const {
+ return (BasicObjectLock**) sp_addr_at(Lmonitors->sp_offset_in_saved_window());
+}
+inline intptr_t** frame::interpreter_frame_esp_addr() const {
+ return (intptr_t**)sp_addr_at(Lesp->sp_offset_in_saved_window());
+}
+
inline void frame::interpreter_frame_set_tos_address( intptr_t* x ) {
*interpreter_frame_esp_addr() = x - 1;
}
--- a/hotspot/src/cpu/sparc/vm/icBuffer_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/icBuffer_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_sparc.inline.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "code/icBuffer.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/bytecodes.hpp"
--- a/hotspot/src/cpu/sparc/vm/icache_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/icache_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "assembler_sparc.inline.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "runtime/icache.hpp"
#define __ _masm->
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -36,12 +36,7 @@
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/sharedRuntime.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
+#include "runtime/thread.inline.hpp"
#ifndef CC_INTERP
#ifndef FAST_DISPATCH
@@ -523,7 +518,8 @@
delayed()->nop();
// Compute max expression stack+register save area
- lduh(Lmethod, in_bytes(Method::max_stack_offset()), Gframe_size); // Load max stack.
+ ld_ptr(Lmethod, in_bytes(Method::const_offset()), Gframe_size);
+ lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size); // Load max stack.
add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size );
//
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,7 +25,7 @@
#ifndef CPU_SPARC_VM_INTERP_MASM_SPARC_HPP
#define CPU_SPARC_VM_INTERP_MASM_SPARC_HPP
-#include "assembler_sparc.inline.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "interpreter/invocationCounter.hpp"
// This file specializes the assember with interpreter-specific macros
--- a/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
+#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"
--- a/hotspot/src/cpu/sparc/vm/jniFastGetField_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/jniFastGetField_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "assembler_sparc.inline.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,4610 @@
+/*
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.inline.hpp"
+#include "compiler/disassembler.hpp"
+#include "gc_interface/collectedHeap.inline.hpp"
+#include "interpreter/interpreter.hpp"
+#include "memory/cardTableModRefBS.hpp"
+#include "memory/resourceArea.hpp"
+#include "prims/methodHandles.hpp"
+#include "runtime/biasedLocking.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/objectMonitor.hpp"
+#include "runtime/os.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#ifndef SERIALGC
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#endif
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#define STOP(error) stop(error)
+#else
+#define BLOCK_COMMENT(str) block_comment(str)
+#define STOP(error) block_comment(error); stop(error)
+#endif
+
+// Convert the raw encoding form into the form expected by the
+// constructor for Address.
+Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
+ assert(scale == 0, "not supported");
+ RelocationHolder rspec;
+ if (disp_reloc != relocInfo::none) {
+ rspec = Relocation::spec_simple(disp_reloc);
+ }
+
+ Register rindex = as_Register(index);
+ if (rindex != G0) {
+ Address madr(as_Register(base), rindex);
+ madr._rspec = rspec;
+ return madr;
+ } else {
+ Address madr(as_Register(base), disp);
+ madr._rspec = rspec;
+ return madr;
+ }
+}
+
+Address Argument::address_in_frame() const {
+ // Warning: In LP64 mode disp will occupy more than 10 bits, but
+ // op codes such as ld or ldx, only access disp() to get
+ // their simm13 argument.
+ int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS;
+ if (is_in())
+ return Address(FP, disp); // In argument.
+ else
+ return Address(SP, disp); // Out argument.
+}
+
+static const char* argumentNames[][2] = {
+ {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"},
+ {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"},
+ {"A(n>9)","P(n>9)"}
+};
+
+const char* Argument::name() const {
+ int nofArgs = sizeof argumentNames / sizeof argumentNames[0];
+ int num = number();
+ if (num >= nofArgs) num = nofArgs - 1;
+ return argumentNames[num][is_in() ? 1 : 0];
+}
+
+#ifdef ASSERT
+// On RISC, there's no benefit to verifying instruction boundaries.
+bool AbstractAssembler::pd_check_instruction_mark() { return false; }
+#endif
+
+
+void MacroAssembler::print_instruction(int inst) {
+ const char* s;
+ switch (inv_op(inst)) {
+ default: s = "????"; break;
+ case call_op: s = "call"; break;
+ case branch_op:
+ switch (inv_op2(inst)) {
+ case fb_op2: s = "fb"; break;
+ case fbp_op2: s = "fbp"; break;
+ case br_op2: s = "br"; break;
+ case bp_op2: s = "bp"; break;
+ case cb_op2: s = "cb"; break;
+ case bpr_op2: {
+ if (is_cbcond(inst)) {
+ s = is_cxb(inst) ? "cxb" : "cwb";
+ } else {
+ s = "bpr";
+ }
+ break;
+ }
+ default: s = "????"; break;
+ }
+ }
+ ::tty->print("%s", s);
+}
+
+
+// Patch instruction inst at offset inst_pos to refer to dest_pos
+// and return the resulting instruction.
+// We should have pcs, not offsets, but since all is relative, it will work out
+// OK.
+int MacroAssembler::patched_branch(int dest_pos, int inst, int inst_pos) {
+ int m; // mask for displacement field
+ int v; // new value for displacement field
+ const int word_aligned_ones = -4;
+ switch (inv_op(inst)) {
+ default: ShouldNotReachHere();
+ case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break;
+ case branch_op:
+ switch (inv_op2(inst)) {
+ case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break;
+ case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break;
+ case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
+ case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
+ case cb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
+ case bpr_op2: {
+ if (is_cbcond(inst)) {
+ m = wdisp10(word_aligned_ones, 0);
+ v = wdisp10(dest_pos, inst_pos);
+ } else {
+ m = wdisp16(word_aligned_ones, 0);
+ v = wdisp16(dest_pos, inst_pos);
+ }
+ break;
+ }
+ default: ShouldNotReachHere();
+ }
+ }
+ return inst & ~m | v;
+}
+
+// Return the offset of the branch destionation of instruction inst
+// at offset pos.
+// Should have pcs, but since all is relative, it works out.
+int MacroAssembler::branch_destination(int inst, int pos) {
+ int r;
+ switch (inv_op(inst)) {
+ default: ShouldNotReachHere();
+ case call_op: r = inv_wdisp(inst, pos, 30); break;
+ case branch_op:
+ switch (inv_op2(inst)) {
+ case fbp_op2: r = inv_wdisp( inst, pos, 19); break;
+ case bp_op2: r = inv_wdisp( inst, pos, 19); break;
+ case fb_op2: r = inv_wdisp( inst, pos, 22); break;
+ case br_op2: r = inv_wdisp( inst, pos, 22); break;
+ case cb_op2: r = inv_wdisp( inst, pos, 22); break;
+ case bpr_op2: {
+ if (is_cbcond(inst)) {
+ r = inv_wdisp10(inst, pos);
+ } else {
+ r = inv_wdisp16(inst, pos);
+ }
+ break;
+ }
+ default: ShouldNotReachHere();
+ }
+ }
+ return r;
+}
+
+void MacroAssembler::null_check(Register reg, int offset) {
+ if (needs_explicit_null_check((intptr_t)offset)) {
+ // provoke OS NULL exception if reg = NULL by
+ // accessing M[reg] w/o changing any registers
+ ld_ptr(reg, 0, G0);
+ }
+ else {
+ // nothing to do, (later) access of M[reg + offset]
+ // will provoke OS NULL exception if reg = NULL
+ }
+}
+
+// Ring buffer jumps
+
+#ifndef PRODUCT
+void MacroAssembler::ret( bool trace ) { if (trace) {
+ mov(I7, O7); // traceable register
+ JMP(O7, 2 * BytesPerInstWord);
+ } else {
+ jmpl( I7, 2 * BytesPerInstWord, G0 );
+ }
+ }
+
+void MacroAssembler::retl( bool trace ) { if (trace) JMP(O7, 2 * BytesPerInstWord);
+ else jmpl( O7, 2 * BytesPerInstWord, G0 ); }
+#endif /* PRODUCT */
+
+
+void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) {
+ assert_not_delayed();
+ // This can only be traceable if r1 & r2 are visible after a window save
+ if (TraceJumps) {
+#ifndef PRODUCT
+ save_frame(0);
+ verify_thread();
+ ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
+ add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
+ sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
+ add(O2, O1, O1);
+
+ add(r1->after_save(), r2->after_save(), O2);
+ set((intptr_t)file, O3);
+ set(line, O4);
+ Label L;
+ // get nearby pc, store jmp target
+ call(L, relocInfo::none); // No relocation for call to pc+0x8
+ delayed()->st(O2, O1, 0);
+ bind(L);
+
+ // store nearby pc
+ st(O7, O1, sizeof(intptr_t));
+ // store file
+ st(O3, O1, 2*sizeof(intptr_t));
+ // store line
+ st(O4, O1, 3*sizeof(intptr_t));
+ add(O0, 1, O0);
+ and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
+ st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
+ restore();
+#endif /* PRODUCT */
+ }
+ jmpl(r1, r2, G0);
+}
+void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) {
+ assert_not_delayed();
+ // This can only be traceable if r1 is visible after a window save
+ if (TraceJumps) {
+#ifndef PRODUCT
+ save_frame(0);
+ verify_thread();
+ ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
+ add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
+ sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
+ add(O2, O1, O1);
+
+ add(r1->after_save(), offset, O2);
+ set((intptr_t)file, O3);
+ set(line, O4);
+ Label L;
+ // get nearby pc, store jmp target
+ call(L, relocInfo::none); // No relocation for call to pc+0x8
+ delayed()->st(O2, O1, 0);
+ bind(L);
+
+ // store nearby pc
+ st(O7, O1, sizeof(intptr_t));
+ // store file
+ st(O3, O1, 2*sizeof(intptr_t));
+ // store line
+ st(O4, O1, 3*sizeof(intptr_t));
+ add(O0, 1, O0);
+ and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
+ st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
+ restore();
+#endif /* PRODUCT */
+ }
+ jmp(r1, offset);
+}
+
+// This code sequence is relocatable to any address, even on LP64.
+void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) {
+ assert_not_delayed();
+ // Force fixed length sethi because NativeJump and NativeFarCall don't handle
+ // variable length instruction streams.
+ patchable_sethi(addrlit, temp);
+ Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement.
+ if (TraceJumps) {
+#ifndef PRODUCT
+ // Must do the add here so relocation can find the remainder of the
+ // value to be relocated.
+ add(a.base(), a.disp(), a.base(), addrlit.rspec(offset));
+ save_frame(0);
+ verify_thread();
+ ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
+ add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
+ sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
+ add(O2, O1, O1);
+
+ set((intptr_t)file, O3);
+ set(line, O4);
+ Label L;
+
+ // get nearby pc, store jmp target
+ call(L, relocInfo::none); // No relocation for call to pc+0x8
+ delayed()->st(a.base()->after_save(), O1, 0);
+ bind(L);
+
+ // store nearby pc
+ st(O7, O1, sizeof(intptr_t));
+ // store file
+ st(O3, O1, 2*sizeof(intptr_t));
+ // store line
+ st(O4, O1, 3*sizeof(intptr_t));
+ add(O0, 1, O0);
+ and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
+ st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
+ restore();
+ jmpl(a.base(), G0, d);
+#else
+ jmpl(a.base(), a.disp(), d);
+#endif /* PRODUCT */
+ } else {
+ jmpl(a.base(), a.disp(), d);
+ }
+}
+
+void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) {
+ jumpl(addrlit, temp, G0, offset, file, line);
+}
+
+
+// Conditional breakpoint (for assertion checks in assembly code)
+void MacroAssembler::breakpoint_trap(Condition c, CC cc) {
+ trap(c, cc, G0, ST_RESERVED_FOR_USER_0);
+}
+
+// We want to use ST_BREAKPOINT here, but the debugger is confused by it.
+void MacroAssembler::breakpoint_trap() {
+ trap(ST_RESERVED_FOR_USER_0);
+}
+
+// flush windows (except current) using flushw instruction if avail.
+void MacroAssembler::flush_windows() {
+ if (VM_Version::v9_instructions_work()) flushw();
+ else flush_windows_trap();
+}
+
+// Write serialization page so VM thread can do a pseudo remote membar
+// We use the current thread pointer to calculate a thread specific
+// offset to write to within the page. This minimizes bus traffic
+// due to cache line collision.
+void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
+ srl(thread, os::get_serialize_page_shift_count(), tmp2);
+ if (Assembler::is_simm13(os::vm_page_size())) {
+ and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2);
+ }
+ else {
+ set((os::vm_page_size() - sizeof(int)), tmp1);
+ and3(tmp2, tmp1, tmp2);
+ }
+ set(os::get_memory_serialize_page(), tmp1);
+ st(G0, tmp1, tmp2);
+}
+
+
+
+void MacroAssembler::enter() {
+ Unimplemented();
+}
+
+void MacroAssembler::leave() {
+ Unimplemented();
+}
+
+void MacroAssembler::mult(Register s1, Register s2, Register d) {
+ if(VM_Version::v9_instructions_work()) {
+ mulx (s1, s2, d);
+ } else {
+ smul (s1, s2, d);
+ }
+}
+
+void MacroAssembler::mult(Register s1, int simm13a, Register d) {
+ if(VM_Version::v9_instructions_work()) {
+ mulx (s1, simm13a, d);
+ } else {
+ smul (s1, simm13a, d);
+ }
+}
+
+
+#ifdef ASSERT
+void MacroAssembler::read_ccr_v8_assert(Register ccr_save) {
+ const Register s1 = G3_scratch;
+ const Register s2 = G4_scratch;
+ Label get_psr_test;
+ // Get the condition codes the V8 way.
+ read_ccr_trap(s1);
+ mov(ccr_save, s2);
+ // This is a test of V8 which has icc but not xcc
+ // so mask off the xcc bits
+ and3(s2, 0xf, s2);
+ // Compare condition codes from the V8 and V9 ways.
+ subcc(s2, s1, G0);
+ br(Assembler::notEqual, true, Assembler::pt, get_psr_test);
+ delayed()->breakpoint_trap();
+ bind(get_psr_test);
+}
+
+void MacroAssembler::write_ccr_v8_assert(Register ccr_save) {
+ const Register s1 = G3_scratch;
+ const Register s2 = G4_scratch;
+ Label set_psr_test;
+ // Write out the saved condition codes the V8 way
+ write_ccr_trap(ccr_save, s1, s2);
+ // Read back the condition codes using the V9 instruction
+ rdccr(s1);
+ mov(ccr_save, s2);
+ // This is a test of V8 which has icc but not xcc
+ // so mask off the xcc bits
+ and3(s2, 0xf, s2);
+ and3(s1, 0xf, s1);
+ // Compare the V8 way with the V9 way.
+ subcc(s2, s1, G0);
+ br(Assembler::notEqual, true, Assembler::pt, set_psr_test);
+ delayed()->breakpoint_trap();
+ bind(set_psr_test);
+}
+#else
+#define read_ccr_v8_assert(x)
+#define write_ccr_v8_assert(x)
+#endif // ASSERT
+
+void MacroAssembler::read_ccr(Register ccr_save) {
+ if (VM_Version::v9_instructions_work()) {
+ rdccr(ccr_save);
+ // Test code sequence used on V8. Do not move above rdccr.
+ read_ccr_v8_assert(ccr_save);
+ } else {
+ read_ccr_trap(ccr_save);
+ }
+}
+
+void MacroAssembler::write_ccr(Register ccr_save) {
+ if (VM_Version::v9_instructions_work()) {
+ // Test code sequence used on V8. Do not move below wrccr.
+ write_ccr_v8_assert(ccr_save);
+ wrccr(ccr_save);
+ } else {
+ const Register temp_reg1 = G3_scratch;
+ const Register temp_reg2 = G4_scratch;
+ write_ccr_trap(ccr_save, temp_reg1, temp_reg2);
+ }
+}
+
+
+// Calls to C land
+
+#ifdef ASSERT
+// a hook for debugging
+static Thread* reinitialize_thread() {
+ return ThreadLocalStorage::thread();
+}
+#else
+#define reinitialize_thread ThreadLocalStorage::thread
+#endif
+
+#ifdef ASSERT
+address last_get_thread = NULL;
+#endif
+
+// call this when G2_thread is not known to be valid
+void MacroAssembler::get_thread() {
+ save_frame(0); // to avoid clobbering O0
+ mov(G1, L0); // avoid clobbering G1
+ mov(G5_method, L1); // avoid clobbering G5
+ mov(G3, L2); // avoid clobbering G3 also
+ mov(G4, L5); // avoid clobbering G4
+#ifdef ASSERT
+ AddressLiteral last_get_thread_addrlit(&last_get_thread);
+ set(last_get_thread_addrlit, L3);
+ inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call
+ st_ptr(L4, L3, 0);
+#endif
+ call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type);
+ delayed()->nop();
+ mov(L0, G1);
+ mov(L1, G5_method);
+ mov(L2, G3);
+ mov(L5, G4);
+ restore(O0, 0, G2_thread);
+}
+
+static Thread* verify_thread_subroutine(Thread* gthread_value) {
+ Thread* correct_value = ThreadLocalStorage::thread();
+ guarantee(gthread_value == correct_value, "G2_thread value must be the thread");
+ return correct_value;
+}
+
+void MacroAssembler::verify_thread() {
+ if (VerifyThread) {
+ // NOTE: this chops off the heads of the 64-bit O registers.
+#ifdef CC_INTERP
+ save_frame(0);
+#else
+ // make sure G2_thread contains the right value
+ save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof)
+ mov(G1, L1); // avoid clobbering G1
+ // G2 saved below
+ mov(G3, L3); // avoid clobbering G3
+ mov(G4, L4); // avoid clobbering G4
+ mov(G5_method, L5); // avoid clobbering G5_method
+#endif /* CC_INTERP */
+#if defined(COMPILER2) && !defined(_LP64)
+ // Save & restore possible 64-bit Long arguments in G-regs
+ srlx(G1,32,L0);
+ srlx(G4,32,L6);
+#endif
+ call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type);
+ delayed()->mov(G2_thread, O0);
+
+ mov(L1, G1); // Restore G1
+ // G2 restored below
+ mov(L3, G3); // restore G3
+ mov(L4, G4); // restore G4
+ mov(L5, G5_method); // restore G5_method
+#if defined(COMPILER2) && !defined(_LP64)
+ // Save & restore possible 64-bit Long arguments in G-regs
+ sllx(L0,32,G2); // Move old high G1 bits high in G2
+ srl(G1, 0,G1); // Clear current high G1 bits
+ or3 (G1,G2,G1); // Recover 64-bit G1
+ sllx(L6,32,G2); // Move old high G4 bits high in G2
+ srl(G4, 0,G4); // Clear current high G4 bits
+ or3 (G4,G2,G4); // Recover 64-bit G4
+#endif
+ restore(O0, 0, G2_thread);
+ }
+}
+
+
+void MacroAssembler::save_thread(const Register thread_cache) {
+ verify_thread();
+ if (thread_cache->is_valid()) {
+ assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile");
+ mov(G2_thread, thread_cache);
+ }
+ if (VerifyThread) {
+ // smash G2_thread, as if the VM were about to anyway
+ set(0x67676767, G2_thread);
+ }
+}
+
+
+void MacroAssembler::restore_thread(const Register thread_cache) {
+ if (thread_cache->is_valid()) {
+ assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile");
+ mov(thread_cache, G2_thread);
+ verify_thread();
+ } else {
+ // do it the slow way
+ get_thread();
+ }
+}
+
+
+// %%% maybe get rid of [re]set_last_Java_frame
+void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) {
+ assert_not_delayed();
+ Address flags(G2_thread, JavaThread::frame_anchor_offset() +
+ JavaFrameAnchor::flags_offset());
+ Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset());
+
+ // Always set last_Java_pc and flags first because once last_Java_sp is visible
+ // has_last_Java_frame is true and users will look at the rest of the fields.
+ // (Note: flags should always be zero before we get here so doesn't need to be set.)
+
+#ifdef ASSERT
+ // Verify that flags was zeroed on return to Java
+ Label PcOk;
+ save_frame(0); // to avoid clobbering O0
+ ld_ptr(pc_addr, L0);
+ br_null_short(L0, Assembler::pt, PcOk);
+ STOP("last_Java_pc not zeroed before leaving Java");
+ bind(PcOk);
+
+ // Verify that flags was zeroed on return to Java
+ Label FlagsOk;
+ ld(flags, L0);
+ tst(L0);
+ br(Assembler::zero, false, Assembler::pt, FlagsOk);
+ delayed() -> restore();
+ STOP("flags not zeroed before leaving Java");
+ bind(FlagsOk);
+#endif /* ASSERT */
+ //
+ // When returning from calling out from Java mode the frame anchor's last_Java_pc
+ // will always be set to NULL. It is set here so that if we are doing a call to
+ // native (not VM) that we capture the known pc and don't have to rely on the
+ // native call having a standard frame linkage where we can find the pc.
+
+ if (last_Java_pc->is_valid()) {
+ st_ptr(last_Java_pc, pc_addr);
+ }
+
+#ifdef _LP64
+#ifdef ASSERT
+ // Make sure that we have an odd stack
+ Label StackOk;
+ andcc(last_java_sp, 0x01, G0);
+ br(Assembler::notZero, false, Assembler::pt, StackOk);
+ delayed()->nop();
+ STOP("Stack Not Biased in set_last_Java_frame");
+ bind(StackOk);
+#endif // ASSERT
+ assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame");
+ add( last_java_sp, STACK_BIAS, G4_scratch );
+ st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset());
+#else
+ st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset());
+#endif // _LP64
+}
+
+void MacroAssembler::reset_last_Java_frame(void) {
+ assert_not_delayed();
+
+ Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset());
+ Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
+ Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
+
+#ifdef ASSERT
+ // check that it WAS previously set
+#ifdef CC_INTERP
+ save_frame(0);
+#else
+ save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame for -Xprof
+#endif /* CC_INTERP */
+ ld_ptr(sp_addr, L0);
+ tst(L0);
+ breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
+ restore();
+#endif // ASSERT
+
+ st_ptr(G0, sp_addr);
+ // Always return last_Java_pc to zero
+ st_ptr(G0, pc_addr);
+ // Always null flags after return to Java
+ st(G0, flags);
+}
+
+
+void MacroAssembler::call_VM_base(
+ Register oop_result,
+ Register thread_cache,
+ Register last_java_sp,
+ address entry_point,
+ int number_of_arguments,
+ bool check_exceptions)
+{
+ assert_not_delayed();
+
+ // determine last_java_sp register
+ if (!last_java_sp->is_valid()) {
+ last_java_sp = SP;
+ }
+ // debugging support
+ assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
+
+ // 64-bit last_java_sp is biased!
+ set_last_Java_frame(last_java_sp, noreg);
+ if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early
+ save_thread(thread_cache);
+ // do the call
+ call(entry_point, relocInfo::runtime_call_type);
+ if (!VerifyThread)
+ delayed()->mov(G2_thread, O0); // pass thread as first argument
+ else
+ delayed()->nop(); // (thread already passed)
+ restore_thread(thread_cache);
+ reset_last_Java_frame();
+
+ // check for pending exceptions. use Gtemp as scratch register.
+ if (check_exceptions) {
+ check_and_forward_exception(Gtemp);
+ }
+
+#ifdef ASSERT
+ set(badHeapWordVal, G3);
+ set(badHeapWordVal, G4);
+ set(badHeapWordVal, G5);
+#endif
+
+ // get oop result if there is one and reset the value in the thread
+ if (oop_result->is_valid()) {
+ get_vm_result(oop_result);
+ }
+}
+
+void MacroAssembler::check_and_forward_exception(Register scratch_reg)
+{
+ Label L;
+
+ check_and_handle_popframe(scratch_reg);
+ check_and_handle_earlyret(scratch_reg);
+
+ Address exception_addr(G2_thread, Thread::pending_exception_offset());
+ ld_ptr(exception_addr, scratch_reg);
+ br_null_short(scratch_reg, pt, L);
+ // we use O7 linkage so that forward_exception_entry has the issuing PC
+ call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
+ delayed()->nop();
+ bind(L);
+}
+
+
+void MacroAssembler::check_and_handle_popframe(Register scratch_reg) {
+}
+
+
+void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
+}
+
+
+void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
+ call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
+}
+
+
+void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
+ // O0 is reserved for the thread
+ mov(arg_1, O1);
+ call_VM(oop_result, entry_point, 1, check_exceptions);
+}
+
+
+void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
+ // O0 is reserved for the thread
+ mov(arg_1, O1);
+ mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
+ call_VM(oop_result, entry_point, 2, check_exceptions);
+}
+
+
+void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
+ // O0 is reserved for the thread
+ mov(arg_1, O1);
+ mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
+ mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument");
+ call_VM(oop_result, entry_point, 3, check_exceptions);
+}
+
+
+
+// Note: The following call_VM overloadings are useful when a "save"
+// has already been performed by a stub, and the last Java frame is
+// the previous one. In that case, last_java_sp must be passed as FP
+// instead of SP.
+
+
+void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) {
+ call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions);
+}
+
+
+void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
+ // O0 is reserved for the thread
+ mov(arg_1, O1);
+ call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
+}
+
+
+void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
+ // O0 is reserved for the thread
+ mov(arg_1, O1);
+ mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
+ call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
+}
+
+
+void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
+ // O0 is reserved for the thread
+ mov(arg_1, O1);
+ mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
+ mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument");
+ call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
+}
+
+
+
+void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) {
+ assert_not_delayed();
+ save_thread(thread_cache);
+ // do the call
+ call(entry_point, relocInfo::runtime_call_type);
+ delayed()->nop();
+ restore_thread(thread_cache);
+#ifdef ASSERT
+ set(badHeapWordVal, G3);
+ set(badHeapWordVal, G4);
+ set(badHeapWordVal, G5);
+#endif
+}
+
+
+void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) {
+ call_VM_leaf_base(thread_cache, entry_point, number_of_arguments);
+}
+
+
+void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) {
+ mov(arg_1, O0);
+ call_VM_leaf(thread_cache, entry_point, 1);
+}
+
+
+void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
+ mov(arg_1, O0);
+ mov(arg_2, O1); assert(arg_2 != O0, "smashed argument");
+ call_VM_leaf(thread_cache, entry_point, 2);
+}
+
+
+void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) {
+ mov(arg_1, O0);
+ mov(arg_2, O1); assert(arg_2 != O0, "smashed argument");
+ mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument");
+ call_VM_leaf(thread_cache, entry_point, 3);
+}
+
+
+void MacroAssembler::get_vm_result(Register oop_result) {
+ verify_thread();
+ Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
+ ld_ptr( vm_result_addr, oop_result);
+ st_ptr(G0, vm_result_addr);
+ verify_oop(oop_result);
+}
+
+
+void MacroAssembler::get_vm_result_2(Register metadata_result) {
+ verify_thread();
+ Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
+ ld_ptr(vm_result_addr_2, metadata_result);
+ st_ptr(G0, vm_result_addr_2);
+}
+
+
+// We require that C code which does not return a value in vm_result will
+// leave it undisturbed.
+void MacroAssembler::set_vm_result(Register oop_result) {
+ verify_thread();
+ Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
+ verify_oop(oop_result);
+
+# ifdef ASSERT
+ // Check that we are not overwriting any other oop.
+#ifdef CC_INTERP
+ save_frame(0);
+#else
+ save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod for -Xprof
+#endif /* CC_INTERP */
+ ld_ptr(vm_result_addr, L0);
+ tst(L0);
+ restore();
+ breakpoint_trap(notZero, Assembler::ptr_cc);
+ // }
+# endif
+
+ st_ptr(oop_result, vm_result_addr);
+}
+
+
+void MacroAssembler::ic_call(address entry, bool emit_delay) {
+ RelocationHolder rspec = virtual_call_Relocation::spec(pc());
+ patchable_set((intptr_t)Universe::non_oop_word(), G5_inline_cache_reg);
+ relocate(rspec);
+ call(entry, relocInfo::none);
+ if (emit_delay) {
+ delayed()->nop();
+ }
+}
+
+
+void MacroAssembler::card_table_write(jbyte* byte_map_base,
+ Register tmp, Register obj) {
+#ifdef _LP64
+ srlx(obj, CardTableModRefBS::card_shift, obj);
+#else
+ srl(obj, CardTableModRefBS::card_shift, obj);
+#endif
+ assert(tmp != obj, "need separate temp reg");
+ set((address) byte_map_base, tmp);
+ stb(G0, tmp, obj);
+}
+
+
+void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
+ address save_pc;
+ int shiftcnt;
+#ifdef _LP64
+# ifdef CHECK_DELAY
+ assert_not_delayed((char*) "cannot put two instructions in delay slot");
+# endif
+ v9_dep();
+ save_pc = pc();
+
+ int msb32 = (int) (addrlit.value() >> 32);
+ int lsb32 = (int) (addrlit.value());
+
+ if (msb32 == 0 && lsb32 >= 0) {
+ Assembler::sethi(lsb32, d, addrlit.rspec());
+ }
+ else if (msb32 == -1) {
+ Assembler::sethi(~lsb32, d, addrlit.rspec());
+ xor3(d, ~low10(~0), d);
+ }
+ else {
+ Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits
+ if (msb32 & 0x3ff) // Any bits?
+ or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32
+ if (lsb32 & 0xFFFFFC00) { // done?
+ if ((lsb32 >> 20) & 0xfff) { // Any bits set?
+ sllx(d, 12, d); // Make room for next 12 bits
+ or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12
+ shiftcnt = 0; // We already shifted
+ }
+ else
+ shiftcnt = 12;
+ if ((lsb32 >> 10) & 0x3ff) {
+ sllx(d, shiftcnt + 10, d); // Make room for last 10 bits
+ or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10
+ shiftcnt = 0;
+ }
+ else
+ shiftcnt = 10;
+ sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd
+ }
+ else
+ sllx(d, 32, d);
+ }
+ // Pad out the instruction sequence so it can be patched later.
+ if (ForceRelocatable || (addrlit.rtype() != relocInfo::none &&
+ addrlit.rtype() != relocInfo::runtime_call_type)) {
+ while (pc() < (save_pc + (7 * BytesPerInstWord)))
+ nop();
+ }
+#else
+ Assembler::sethi(addrlit.value(), d, addrlit.rspec());
+#endif
+}
+
+
+void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) {
+ internal_sethi(addrlit, d, false);
+}
+
+
+void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) {
+ internal_sethi(addrlit, d, true);
+}
+
+
+int MacroAssembler::insts_for_sethi(address a, bool worst_case) {
+#ifdef _LP64
+ if (worst_case) return 7;
+ intptr_t iaddr = (intptr_t) a;
+ int msb32 = (int) (iaddr >> 32);
+ int lsb32 = (int) (iaddr);
+ int count;
+ if (msb32 == 0 && lsb32 >= 0)
+ count = 1;
+ else if (msb32 == -1)
+ count = 2;
+ else {
+ count = 2;
+ if (msb32 & 0x3ff)
+ count++;
+ if (lsb32 & 0xFFFFFC00 ) {
+ if ((lsb32 >> 20) & 0xfff) count += 2;
+ if ((lsb32 >> 10) & 0x3ff) count += 2;
+ }
+ }
+ return count;
+#else
+ return 1;
+#endif
+}
+
+int MacroAssembler::worst_case_insts_for_set() {
+ return insts_for_sethi(NULL, true) + 1;
+}
+
+
+// Keep in sync with MacroAssembler::insts_for_internal_set
+void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
+ intptr_t value = addrlit.value();
+
+ if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) {
+ // can optimize
+ if (-4096 <= value && value <= 4095) {
+ or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended)
+ return;
+ }
+ if (inv_hi22(hi22(value)) == value) {
+ sethi(addrlit, d);
+ return;
+ }
+ }
+ assert_not_delayed((char*) "cannot put two instructions in delay slot");
+ internal_sethi(addrlit, d, ForceRelocatable);
+ if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) {
+ add(d, addrlit.low10(), d, addrlit.rspec());
+ }
+}
+
+// Keep in sync with MacroAssembler::internal_set
+int MacroAssembler::insts_for_internal_set(intptr_t value) {
+ // can optimize
+ if (-4096 <= value && value <= 4095) {
+ return 1;
+ }
+ if (inv_hi22(hi22(value)) == value) {
+ return insts_for_sethi((address) value);
+ }
+ int count = insts_for_sethi((address) value);
+ AddressLiteral al(value);
+ if (al.low10() != 0) {
+ count++;
+ }
+ return count;
+}
+
+void MacroAssembler::set(const AddressLiteral& al, Register d) {
+ internal_set(al, d, false);
+}
+
+void MacroAssembler::set(intptr_t value, Register d) {
+ AddressLiteral al(value);
+ internal_set(al, d, false);
+}
+
+void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) {
+ AddressLiteral al(addr, rspec);
+ internal_set(al, d, false);
+}
+
+void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) {
+ internal_set(al, d, true);
+}
+
+void MacroAssembler::patchable_set(intptr_t value, Register d) {
+ AddressLiteral al(value);
+ internal_set(al, d, true);
+}
+
+
+void MacroAssembler::set64(jlong value, Register d, Register tmp) {
+ assert_not_delayed();
+ v9_dep();
+
+ int hi = (int)(value >> 32);
+ int lo = (int)(value & ~0);
+ // (Matcher::isSimpleConstant64 knows about the following optimizations.)
+ if (Assembler::is_simm13(lo) && value == lo) {
+ or3(G0, lo, d);
+ } else if (hi == 0) {
+ Assembler::sethi(lo, d); // hardware version zero-extends to upper 32
+ if (low10(lo) != 0)
+ or3(d, low10(lo), d);
+ }
+ else if (hi == -1) {
+ Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32
+ xor3(d, low10(lo) ^ ~low10(~0), d);
+ }
+ else if (lo == 0) {
+ if (Assembler::is_simm13(hi)) {
+ or3(G0, hi, d);
+ } else {
+ Assembler::sethi(hi, d); // hardware version zero-extends to upper 32
+ if (low10(hi) != 0)
+ or3(d, low10(hi), d);
+ }
+ sllx(d, 32, d);
+ }
+ else {
+ Assembler::sethi(hi, tmp);
+ Assembler::sethi(lo, d); // macro assembler version sign-extends
+ if (low10(hi) != 0)
+ or3 (tmp, low10(hi), tmp);
+ if (low10(lo) != 0)
+ or3 ( d, low10(lo), d);
+ sllx(tmp, 32, tmp);
+ or3 (d, tmp, d);
+ }
+}
+
+int MacroAssembler::insts_for_set64(jlong value) {
+ v9_dep();
+
+ int hi = (int) (value >> 32);
+ int lo = (int) (value & ~0);
+ int count = 0;
+
+ // (Matcher::isSimpleConstant64 knows about the following optimizations.)
+ if (Assembler::is_simm13(lo) && value == lo) {
+ count++;
+ } else if (hi == 0) {
+ count++;
+ if (low10(lo) != 0)
+ count++;
+ }
+ else if (hi == -1) {
+ count += 2;
+ }
+ else if (lo == 0) {
+ if (Assembler::is_simm13(hi)) {
+ count++;
+ } else {
+ count++;
+ if (low10(hi) != 0)
+ count++;
+ }
+ count++;
+ }
+ else {
+ count += 2;
+ if (low10(hi) != 0)
+ count++;
+ if (low10(lo) != 0)
+ count++;
+ count += 2;
+ }
+ return count;
+}
+
+// compute size in bytes of sparc frame, given
+// number of extraWords
+int MacroAssembler::total_frame_size_in_bytes(int extraWords) {
+
+ int nWords = frame::memory_parameter_word_sp_offset;
+
+ nWords += extraWords;
+
+ if (nWords & 1) ++nWords; // round up to double-word
+
+ return nWords * BytesPerWord;
+}
+
+
+// save_frame: given number of "extra" words in frame,
+// issue approp. save instruction (p 200, v8 manual)
+
+void MacroAssembler::save_frame(int extraWords) {
+ int delta = -total_frame_size_in_bytes(extraWords);
+ if (is_simm13(delta)) {
+ save(SP, delta, SP);
+ } else {
+ set(delta, G3_scratch);
+ save(SP, G3_scratch, SP);
+ }
+}
+
+
+void MacroAssembler::save_frame_c1(int size_in_bytes) {
+ if (is_simm13(-size_in_bytes)) {
+ save(SP, -size_in_bytes, SP);
+ } else {
+ set(-size_in_bytes, G3_scratch);
+ save(SP, G3_scratch, SP);
+ }
+}
+
+
+void MacroAssembler::save_frame_and_mov(int extraWords,
+ Register s1, Register d1,
+ Register s2, Register d2) {
+ assert_not_delayed();
+
+ // The trick here is to use precisely the same memory word
+ // that trap handlers also use to save the register.
+ // This word cannot be used for any other purpose, but
+ // it works fine to save the register's value, whether or not
+ // an interrupt flushes register windows at any given moment!
+ Address s1_addr;
+ if (s1->is_valid() && (s1->is_in() || s1->is_local())) {
+ s1_addr = s1->address_in_saved_window();
+ st_ptr(s1, s1_addr);
+ }
+
+ Address s2_addr;
+ if (s2->is_valid() && (s2->is_in() || s2->is_local())) {
+ s2_addr = s2->address_in_saved_window();
+ st_ptr(s2, s2_addr);
+ }
+
+ save_frame(extraWords);
+
+ if (s1_addr.base() == SP) {
+ ld_ptr(s1_addr.after_save(), d1);
+ } else if (s1->is_valid()) {
+ mov(s1->after_save(), d1);
+ }
+
+ if (s2_addr.base() == SP) {
+ ld_ptr(s2_addr.after_save(), d2);
+ } else if (s2->is_valid()) {
+ mov(s2->after_save(), d2);
+ }
+}
+
+
+AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
+ assert(oop_recorder() != NULL, "this assembler needs a Recorder");
+ int index = oop_recorder()->allocate_metadata_index(obj);
+ RelocationHolder rspec = metadata_Relocation::spec(index);
+ return AddressLiteral((address)obj, rspec);
+}
+
+AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
+ assert(oop_recorder() != NULL, "this assembler needs a Recorder");
+ int index = oop_recorder()->find_index(obj);
+ RelocationHolder rspec = metadata_Relocation::spec(index);
+ return AddressLiteral((address)obj, rspec);
+}
+
+
+AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
+ assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
+ assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop");
+ int oop_index = oop_recorder()->find_index(obj);
+ return AddressLiteral(obj, oop_Relocation::spec(oop_index));
+}
+
+void MacroAssembler::set_narrow_oop(jobject obj, Register d) {
+ assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
+ int oop_index = oop_recorder()->find_index(obj);
+ RelocationHolder rspec = oop_Relocation::spec(oop_index);
+
+ assert_not_delayed();
+ // Relocation with special format (see relocInfo_sparc.hpp).
+ relocate(rspec, 1);
+ // Assembler::sethi(0x3fffff, d);
+ emit_long( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) );
+ // Don't add relocation for 'add'. Do patching during 'sethi' processing.
+ add(d, 0x3ff, d);
+
+}
+
+void MacroAssembler::set_narrow_klass(Klass* k, Register d) {
+ assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
+ int klass_index = oop_recorder()->find_index(k);
+ RelocationHolder rspec = metadata_Relocation::spec(klass_index);
+ narrowOop encoded_k = oopDesc::encode_klass(k);
+
+ assert_not_delayed();
+ // Relocation with special format (see relocInfo_sparc.hpp).
+ relocate(rspec, 1);
+ // Assembler::sethi(encoded_k, d);
+ emit_long( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(encoded_k) );
+ // Don't add relocation for 'add'. Do patching during 'sethi' processing.
+ add(d, low10(encoded_k), d);
+
+}
+
+void MacroAssembler::align(int modulus) {
+ while (offset() % modulus != 0) nop();
+}
+
+
+void MacroAssembler::safepoint() {
+ relocate(breakpoint_Relocation::spec(breakpoint_Relocation::safepoint));
+}
+
+
+void RegistersForDebugging::print(outputStream* s) {
+ FlagSetting fs(Debugging, true);
+ int j;
+ for (j = 0; j < 8; ++j) {
+ if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); }
+ else { s->print( "fp = " ); os::print_location(s, i[j]); }
+ }
+ s->cr();
+
+ for (j = 0; j < 8; ++j) {
+ s->print("l%d = ", j); os::print_location(s, l[j]);
+ }
+ s->cr();
+
+ for (j = 0; j < 8; ++j) {
+ if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); }
+ else { s->print( "sp = " ); os::print_location(s, o[j]); }
+ }
+ s->cr();
+
+ for (j = 0; j < 8; ++j) {
+ s->print("g%d = ", j); os::print_location(s, g[j]);
+ }
+ s->cr();
+
+ // print out floats with compression
+ for (j = 0; j < 32; ) {
+ jfloat val = f[j];
+ int last = j;
+ for ( ; last+1 < 32; ++last ) {
+ char b1[1024], b2[1024];
+ sprintf(b1, "%f", val);
+ sprintf(b2, "%f", f[last+1]);
+ if (strcmp(b1, b2))
+ break;
+ }
+ s->print("f%d", j);
+ if ( j != last ) s->print(" - f%d", last);
+ s->print(" = %f", val);
+ s->fill_to(25);
+ s->print_cr(" (0x%x)", val);
+ j = last + 1;
+ }
+ s->cr();
+
+ // and doubles (evens only)
+ for (j = 0; j < 32; ) {
+ jdouble val = d[j];
+ int last = j;
+ for ( ; last+1 < 32; ++last ) {
+ char b1[1024], b2[1024];
+ sprintf(b1, "%f", val);
+ sprintf(b2, "%f", d[last+1]);
+ if (strcmp(b1, b2))
+ break;
+ }
+ s->print("d%d", 2 * j);
+ if ( j != last ) s->print(" - d%d", last);
+ s->print(" = %f", val);
+ s->fill_to(30);
+ s->print("(0x%x)", *(int*)&val);
+ s->fill_to(42);
+ s->print_cr("(0x%x)", *(1 + (int*)&val));
+ j = last + 1;
+ }
+ s->cr();
+}
+
+void RegistersForDebugging::save_registers(MacroAssembler* a) {
+ a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
+ a->flush_windows();
+ int i;
+ for (i = 0; i < 8; ++i) {
+ a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i));
+ a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i));
+ a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i));
+ a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i));
+ }
+ for (i = 0; i < 32; ++i) {
+ a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i));
+ }
+ for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
+ a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i));
+ }
+}
+
+void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) {
+ for (int i = 1; i < 8; ++i) {
+ a->ld_ptr(r, g_offset(i), as_gRegister(i));
+ }
+ for (int j = 0; j < 32; ++j) {
+ a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j));
+ }
+ for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) {
+ a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k));
+ }
+}
+
+
+// pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
+void MacroAssembler::push_fTOS() {
+ // %%%%%% need to implement this
+}
+
+// pops double TOS element from CPU stack and pushes on FPU stack
+void MacroAssembler::pop_fTOS() {
+ // %%%%%% need to implement this
+}
+
+void MacroAssembler::empty_FPU_stack() {
+ // %%%%%% need to implement this
+}
+
+void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) {
+ // plausibility check for oops
+ if (!VerifyOops) return;
+
+ if (reg == G0) return; // always NULL, which is always an oop
+
+ BLOCK_COMMENT("verify_oop {");
+ char buffer[64];
+#ifdef COMPILER1
+ if (CommentedAssembly) {
+ snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset());
+ block_comment(buffer);
+ }
+#endif
+
+ int len = strlen(file) + strlen(msg) + 1 + 4;
+ sprintf(buffer, "%d", line);
+ len += strlen(buffer);
+ sprintf(buffer, " at offset %d ", offset());
+ len += strlen(buffer);
+ char * real_msg = new char[len];
+ sprintf(real_msg, "%s%s(%s:%d)", msg, buffer, file, line);
+
+ // Call indirectly to solve generation ordering problem
+ AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
+
+ // Make some space on stack above the current register window.
+ // Enough to hold 8 64-bit registers.
+ add(SP,-8*8,SP);
+
+ // Save some 64-bit registers; a normal 'save' chops the heads off
+ // of 64-bit longs in the 32-bit build.
+ stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8);
+ stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8);
+ mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed
+ stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
+
+ // Size of set() should stay the same
+ patchable_set((intptr_t)real_msg, O1);
+ // Load address to call to into O7
+ load_ptr_contents(a, O7);
+ // Register call to verify_oop_subroutine
+ callr(O7, G0);
+ delayed()->nop();
+ // recover frame size
+ add(SP, 8*8,SP);
+ BLOCK_COMMENT("} verify_oop");
+}
+
+void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) {
+ // plausibility check for oops
+ if (!VerifyOops) return;
+
+ char buffer[64];
+ sprintf(buffer, "%d", line);
+ int len = strlen(file) + strlen(msg) + 1 + 4 + strlen(buffer);
+ sprintf(buffer, " at SP+%d ", addr.disp());
+ len += strlen(buffer);
+ char * real_msg = new char[len];
+ sprintf(real_msg, "%s at SP+%d (%s:%d)", msg, addr.disp(), file, line);
+
+ // Call indirectly to solve generation ordering problem
+ AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
+
+ // Make some space on stack above the current register window.
+ // Enough to hold 8 64-bit registers.
+ add(SP,-8*8,SP);
+
+ // Save some 64-bit registers; a normal 'save' chops the heads off
+ // of 64-bit longs in the 32-bit build.
+ stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8);
+ stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8);
+ ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed
+ stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
+
+ // Size of set() should stay the same
+ patchable_set((intptr_t)real_msg, O1);
+ // Load address to call to into O7
+ load_ptr_contents(a, O7);
+ // Register call to verify_oop_subroutine
+ callr(O7, G0);
+ delayed()->nop();
+ // recover frame size
+ add(SP, 8*8,SP);
+}
+
+// side-door communication with signalHandler in os_solaris.cpp
+address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL };
+
+// This macro is expanded just once; it creates shared code. Contract:
+// receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY
+// registers, including flags. May not use a register 'save', as this blows
+// the high bits of the O-regs if they contain Long values. Acts as a 'leaf'
+// call.
+void MacroAssembler::verify_oop_subroutine() {
+ assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" );
+
+ // Leaf call; no frame.
+ Label succeed, fail, null_or_fail;
+
+ // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home).
+ // O0 is now the oop to be checked. O7 is the return address.
+ Register O0_obj = O0;
+
+ // Save some more registers for temps.
+ stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8);
+ stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8);
+ stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8);
+ stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8);
+
+ // Save flags
+ Register O5_save_flags = O5;
+ rdccr( O5_save_flags );
+
+ { // count number of verifies
+ Register O2_adr = O2;
+ Register O3_accum = O3;
+ inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum);
+ }
+
+ Register O2_mask = O2;
+ Register O3_bits = O3;
+ Register O4_temp = O4;
+
+ // mark lower end of faulting range
+ assert(_verify_oop_implicit_branch[0] == NULL, "set once");
+ _verify_oop_implicit_branch[0] = pc();
+
+ // We can't check the mark oop because it could be in the process of
+ // locking or unlocking while this is running.
+ set(Universe::verify_oop_mask (), O2_mask);
+ set(Universe::verify_oop_bits (), O3_bits);
+
+ // assert((obj & oop_mask) == oop_bits);
+ and3(O0_obj, O2_mask, O4_temp);
+ cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail);
+
+ if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) {
+ // the null_or_fail case is useless; must test for null separately
+ br_null_short(O0_obj, pn, succeed);
+ }
+
+ // Check the Klass* of this object for being in the right area of memory.
+ // Cannot do the load in the delay above slot in case O0 is null
+ load_klass(O0_obj, O0_obj);
+ // assert((klass != NULL)
+ br_null_short(O0_obj, pn, fail);
+ // TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers
+
+ wrccr( O5_save_flags ); // Restore CCR's
+
+ // mark upper end of faulting range
+ _verify_oop_implicit_branch[1] = pc();
+
+ //-----------------------
+ // all tests pass
+ bind(succeed);
+
+ // Restore prior 64-bit registers
+ ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0);
+ ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1);
+ ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2);
+ ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3);
+ ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4);
+ ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5);
+
+ retl(); // Leaf return; restore prior O7 in delay slot
+ delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7);
+
+ //-----------------------
+ bind(null_or_fail); // nulls are less common but OK
+ br_null(O0_obj, false, pt, succeed);
+ delayed()->wrccr( O5_save_flags ); // Restore CCR's
+
+ //-----------------------
+ // report failure:
+ bind(fail);
+ _verify_oop_implicit_branch[2] = pc();
+
+ wrccr( O5_save_flags ); // Restore CCR's
+
+ save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
+
+ // stop_subroutine expects message pointer in I1.
+ mov(I1, O1);
+
+ // Restore prior 64-bit registers
+ ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0);
+ ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1);
+ ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2);
+ ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3);
+ ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4);
+ ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5);
+
+ // factor long stop-sequence into subroutine to save space
+ assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
+
+ // call indirectly to solve generation ordering problem
+ AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address());
+ load_ptr_contents(al, O5);
+ jmpl(O5, 0, O7);
+ delayed()->nop();
+}
+
+
+void MacroAssembler::stop(const char* msg) {
+ // save frame first to get O7 for return address
+ // add one word to size in case struct is odd number of words long
+ // It must be doubleword-aligned for storing doubles into it.
+
+ save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
+
+ // stop_subroutine expects message pointer in I1.
+ // Size of set() should stay the same
+ patchable_set((intptr_t)msg, O1);
+
+ // factor long stop-sequence into subroutine to save space
+ assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
+
+ // call indirectly to solve generation ordering problem
+ AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address());
+ load_ptr_contents(a, O5);
+ jmpl(O5, 0, O7);
+ delayed()->nop();
+
+ breakpoint_trap(); // make stop actually stop rather than writing
+ // unnoticeable results in the output files.
+
+ // restore(); done in callee to save space!
+}
+
+
+void MacroAssembler::warn(const char* msg) {
+ save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
+ RegistersForDebugging::save_registers(this);
+ mov(O0, L0);
+ // Size of set() should stay the same
+ patchable_set((intptr_t)msg, O0);
+ call( CAST_FROM_FN_PTR(address, warning) );
+ delayed()->nop();
+// ret();
+// delayed()->restore();
+ RegistersForDebugging::restore_registers(this, L0);
+ restore();
+}
+
+
+void MacroAssembler::untested(const char* what) {
+ // We must be able to turn interactive prompting off
+ // in order to run automated test scripts on the VM
+ // Use the flag ShowMessageBoxOnError
+
+ char* b = new char[1024];
+ sprintf(b, "untested: %s", what);
+
+ if (ShowMessageBoxOnError) { STOP(b); }
+ else { warn(b); }
+}
+
+
+void MacroAssembler::stop_subroutine() {
+ RegistersForDebugging::save_registers(this);
+
+ // for the sake of the debugger, stick a PC on the current frame
+ // (this assumes that the caller has performed an extra "save")
+ mov(I7, L7);
+ add(O7, -7 * BytesPerInt, I7);
+
+ save_frame(); // one more save to free up another O7 register
+ mov(I0, O1); // addr of reg save area
+
+ // We expect pointer to message in I1. Caller must set it up in O1
+ mov(I1, O0); // get msg
+ call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type);
+ delayed()->nop();
+
+ restore();
+
+ RegistersForDebugging::restore_registers(this, O0);
+
+ save_frame(0);
+ call(CAST_FROM_FN_PTR(address,breakpoint));
+ delayed()->nop();
+ restore();
+
+ mov(L7, I7);
+ retl();
+ delayed()->restore(); // see stop above
+}
+
+
+void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) {
+ if ( ShowMessageBoxOnError ) {
+ JavaThread* thread = JavaThread::current();
+ JavaThreadState saved_state = thread->thread_state();
+ thread->set_thread_state(_thread_in_vm);
+ {
+ // In order to get locks work, we need to fake a in_VM state
+ ttyLocker ttyl;
+ ::tty->print_cr("EXECUTION STOPPED: %s\n", msg);
+ if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
+ BytecodeCounter::print();
+ }
+ if (os::message_box(msg, "Execution stopped, print registers?"))
+ regs->print(::tty);
+ }
+ BREAKPOINT;
+ ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state);
+ }
+ else {
+ ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
+ }
+ assert(false, err_msg("DEBUG MESSAGE: %s", msg));
+}
+
+
+void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) {
+ subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words?
+ Label no_extras;
+ br( negative, true, pt, no_extras ); // if neg, clear reg
+ delayed()->set(0, Rresult); // annuled, so only if taken
+ bind( no_extras );
+}
+
+
+void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) {
+#ifdef _LP64
+ add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult);
+#else
+ add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult);
+#endif
+ bclr(1, Rresult);
+ sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes
+}
+
+
+void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) {
+ calc_frame_size(Rextra_words, Rresult);
+ neg(Rresult);
+ save(SP, Rresult, SP);
+}
+
+
+// ---------------------------------------------------------
+Assembler::RCondition cond2rcond(Assembler::Condition c) {
+ switch (c) {
+ /*case zero: */
+ case Assembler::equal: return Assembler::rc_z;
+ case Assembler::lessEqual: return Assembler::rc_lez;
+ case Assembler::less: return Assembler::rc_lz;
+ /*case notZero:*/
+ case Assembler::notEqual: return Assembler::rc_nz;
+ case Assembler::greater: return Assembler::rc_gz;
+ case Assembler::greaterEqual: return Assembler::rc_gez;
+ }
+ ShouldNotReachHere();
+ return Assembler::rc_z;
+}
+
+// compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS
+void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) {
+ tst(s1);
+ br (c, a, p, L);
+}
+
+// Compares a pointer register with zero and branches on null.
+// Does a test & branch on 32-bit systems and a register-branch on 64-bit.
+void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) {
+ assert_not_delayed();
+#ifdef _LP64
+ bpr( rc_z, a, p, s1, L );
+#else
+ tst(s1);
+ br ( zero, a, p, L );
+#endif
+}
+
+void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) {
+ assert_not_delayed();
+#ifdef _LP64
+ bpr( rc_nz, a, p, s1, L );
+#else
+ tst(s1);
+ br ( notZero, a, p, L );
+#endif
+}
+
+// Compare registers and branch with nop in delay slot or cbcond without delay slot.
+
+// Compare integer (32 bit) values (icc only).
+void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c,
+ Predict p, Label& L) {
+ assert_not_delayed();
+ if (use_cbcond(L)) {
+ Assembler::cbcond(c, icc, s1, s2, L);
+ } else {
+ cmp(s1, s2);
+ br(c, false, p, L);
+ delayed()->nop();
+ }
+}
+
+// Compare integer (32 bit) values (icc only).
+void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c,
+ Predict p, Label& L) {
+ assert_not_delayed();
+ if (is_simm(simm13a,5) && use_cbcond(L)) {
+ Assembler::cbcond(c, icc, s1, simm13a, L);
+ } else {
+ cmp(s1, simm13a);
+ br(c, false, p, L);
+ delayed()->nop();
+ }
+}
+
+// Branch that tests xcc in LP64 and icc in !LP64
+void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c,
+ Predict p, Label& L) {
+ assert_not_delayed();
+ if (use_cbcond(L)) {
+ Assembler::cbcond(c, ptr_cc, s1, s2, L);
+ } else {
+ cmp(s1, s2);
+ brx(c, false, p, L);
+ delayed()->nop();
+ }
+}
+
+// Branch that tests xcc in LP64 and icc in !LP64
+void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c,
+ Predict p, Label& L) {
+ assert_not_delayed();
+ if (is_simm(simm13a,5) && use_cbcond(L)) {
+ Assembler::cbcond(c, ptr_cc, s1, simm13a, L);
+ } else {
+ cmp(s1, simm13a);
+ brx(c, false, p, L);
+ delayed()->nop();
+ }
+}
+
+// Short branch version for compares a pointer with zero.
+
+void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) {
+ assert_not_delayed();
+ if (use_cbcond(L)) {
+ Assembler::cbcond(zero, ptr_cc, s1, 0, L);
+ return;
+ }
+ br_null(s1, false, p, L);
+ delayed()->nop();
+}
+
+void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) {
+ assert_not_delayed();
+ if (use_cbcond(L)) {
+ Assembler::cbcond(notZero, ptr_cc, s1, 0, L);
+ return;
+ }
+ br_notnull(s1, false, p, L);
+ delayed()->nop();
+}
+
+// Unconditional short branch
+void MacroAssembler::ba_short(Label& L) {
+ if (use_cbcond(L)) {
+ Assembler::cbcond(equal, icc, G0, G0, L);
+ return;
+ }
+ br(always, false, pt, L);
+ delayed()->nop();
+}
+
+// instruction sequences factored across compiler & interpreter
+
+
+void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low,
+ Register Rb_hi, Register Rb_low,
+ Register Rresult) {
+
+ Label check_low_parts, done;
+
+ cmp(Ra_hi, Rb_hi ); // compare hi parts
+ br(equal, true, pt, check_low_parts);
+ delayed()->cmp(Ra_low, Rb_low); // test low parts
+
+ // And, with an unsigned comparison, it does not matter if the numbers
+ // are negative or not.
+ // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff.
+ // The second one is bigger (unsignedly).
+
+ // Other notes: The first move in each triplet can be unconditional
+ // (and therefore probably prefetchable).
+ // And the equals case for the high part does not need testing,
+ // since that triplet is reached only after finding the high halves differ.
+
+ if (VM_Version::v9_instructions_work()) {
+ mov(-1, Rresult);
+ ba(done); delayed()-> movcc(greater, false, icc, 1, Rresult);
+ } else {
+ br(less, true, pt, done); delayed()-> set(-1, Rresult);
+ br(greater, true, pt, done); delayed()-> set( 1, Rresult);
+ }
+
+ bind( check_low_parts );
+
+ if (VM_Version::v9_instructions_work()) {
+ mov( -1, Rresult);
+ movcc(equal, false, icc, 0, Rresult);
+ movcc(greaterUnsigned, false, icc, 1, Rresult);
+ } else {
+ set(-1, Rresult);
+ br(equal, true, pt, done); delayed()->set( 0, Rresult);
+ br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult);
+ }
+ bind( done );
+}
+
+void MacroAssembler::lneg( Register Rhi, Register Rlow ) {
+ subcc( G0, Rlow, Rlow );
+ subc( G0, Rhi, Rhi );
+}
+
+void MacroAssembler::lshl( Register Rin_high, Register Rin_low,
+ Register Rcount,
+ Register Rout_high, Register Rout_low,
+ Register Rtemp ) {
+
+
+ Register Ralt_count = Rtemp;
+ Register Rxfer_bits = Rtemp;
+
+ assert( Ralt_count != Rin_high
+ && Ralt_count != Rin_low
+ && Ralt_count != Rcount
+ && Rxfer_bits != Rin_low
+ && Rxfer_bits != Rin_high
+ && Rxfer_bits != Rcount
+ && Rxfer_bits != Rout_low
+ && Rout_low != Rin_high,
+ "register alias checks");
+
+ Label big_shift, done;
+
+ // This code can be optimized to use the 64 bit shifts in V9.
+ // Here we use the 32 bit shifts.
+
+ and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
+ subcc(Rcount, 31, Ralt_count);
+ br(greater, true, pn, big_shift);
+ delayed()->dec(Ralt_count);
+
+ // shift < 32 bits, Ralt_count = Rcount-31
+
+ // We get the transfer bits by shifting right by 32-count the low
+ // register. This is done by shifting right by 31-count and then by one
+ // more to take care of the special (rare) case where count is zero
+ // (shifting by 32 would not work).
+
+ neg(Ralt_count);
+
+ // The order of the next two instructions is critical in the case where
+ // Rin and Rout are the same and should not be reversed.
+
+ srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count
+ if (Rcount != Rout_low) {
+ sll(Rin_low, Rcount, Rout_low); // low half
+ }
+ sll(Rin_high, Rcount, Rout_high);
+ if (Rcount == Rout_low) {
+ sll(Rin_low, Rcount, Rout_low); // low half
+ }
+ srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more
+ ba(done);
+ delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low
+
+ // shift >= 32 bits, Ralt_count = Rcount-32
+ bind(big_shift);
+ sll(Rin_low, Ralt_count, Rout_high );
+ clr(Rout_low);
+
+ bind(done);
+}
+
+
+void MacroAssembler::lshr( Register Rin_high, Register Rin_low,
+ Register Rcount,
+ Register Rout_high, Register Rout_low,
+ Register Rtemp ) {
+
+ Register Ralt_count = Rtemp;
+ Register Rxfer_bits = Rtemp;
+
+ assert( Ralt_count != Rin_high
+ && Ralt_count != Rin_low
+ && Ralt_count != Rcount
+ && Rxfer_bits != Rin_low
+ && Rxfer_bits != Rin_high
+ && Rxfer_bits != Rcount
+ && Rxfer_bits != Rout_high
+ && Rout_high != Rin_low,
+ "register alias checks");
+
+ Label big_shift, done;
+
+ // This code can be optimized to use the 64 bit shifts in V9.
+ // Here we use the 32 bit shifts.
+
+ and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
+ subcc(Rcount, 31, Ralt_count);
+ br(greater, true, pn, big_shift);
+ delayed()->dec(Ralt_count);
+
+ // shift < 32 bits, Ralt_count = Rcount-31
+
+ // We get the transfer bits by shifting left by 32-count the high
+ // register. This is done by shifting left by 31-count and then by one
+ // more to take care of the special (rare) case where count is zero
+ // (shifting by 32 would not work).
+
+ neg(Ralt_count);
+ if (Rcount != Rout_low) {
+ srl(Rin_low, Rcount, Rout_low);
+ }
+
+ // The order of the next two instructions is critical in the case where
+ // Rin and Rout are the same and should not be reversed.
+
+ sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count
+ sra(Rin_high, Rcount, Rout_high ); // high half
+ sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more
+ if (Rcount == Rout_low) {
+ srl(Rin_low, Rcount, Rout_low);
+ }
+ ba(done);
+ delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high
+
+ // shift >= 32 bits, Ralt_count = Rcount-32
+ bind(big_shift);
+
+ sra(Rin_high, Ralt_count, Rout_low);
+ sra(Rin_high, 31, Rout_high); // sign into hi
+
+ bind( done );
+}
+
+
+
+void MacroAssembler::lushr( Register Rin_high, Register Rin_low,
+ Register Rcount,
+ Register Rout_high, Register Rout_low,
+ Register Rtemp ) {
+
+ Register Ralt_count = Rtemp;
+ Register Rxfer_bits = Rtemp;
+
+ assert( Ralt_count != Rin_high
+ && Ralt_count != Rin_low
+ && Ralt_count != Rcount
+ && Rxfer_bits != Rin_low
+ && Rxfer_bits != Rin_high
+ && Rxfer_bits != Rcount
+ && Rxfer_bits != Rout_high
+ && Rout_high != Rin_low,
+ "register alias checks");
+
+ Label big_shift, done;
+
+ // This code can be optimized to use the 64 bit shifts in V9.
+ // Here we use the 32 bit shifts.
+
+ and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
+ subcc(Rcount, 31, Ralt_count);
+ br(greater, true, pn, big_shift);
+ delayed()->dec(Ralt_count);
+
+ // shift < 32 bits, Ralt_count = Rcount-31
+
+ // We get the transfer bits by shifting left by 32-count the high
+ // register. This is done by shifting left by 31-count and then by one
+ // more to take care of the special (rare) case where count is zero
+ // (shifting by 32 would not work).
+
+ neg(Ralt_count);
+ if (Rcount != Rout_low) {
+ srl(Rin_low, Rcount, Rout_low);
+ }
+
+ // The order of the next two instructions is critical in the case where
+ // Rin and Rout are the same and should not be reversed.
+
+ sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count
+ srl(Rin_high, Rcount, Rout_high ); // high half
+ sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more
+ if (Rcount == Rout_low) {
+ srl(Rin_low, Rcount, Rout_low);
+ }
+ ba(done);
+ delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high
+
+ // shift >= 32 bits, Ralt_count = Rcount-32
+ bind(big_shift);
+
+ srl(Rin_high, Ralt_count, Rout_low);
+ clr(Rout_high);
+
+ bind( done );
+}
+
+#ifdef _LP64
+void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) {
+ cmp(Ra, Rb);
+ mov(-1, Rresult);
+ movcc(equal, false, xcc, 0, Rresult);
+ movcc(greater, false, xcc, 1, Rresult);
+}
+#endif
+
+
+void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) {
+ switch (size_in_bytes) {
+ case 8: ld_long(src, dst); break;
+ case 4: ld( src, dst); break;
+ case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break;
+ case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break;
+ default: ShouldNotReachHere();
+ }
+}
+
+void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) {
+ switch (size_in_bytes) {
+ case 8: st_long(src, dst); break;
+ case 4: st( src, dst); break;
+ case 2: sth( src, dst); break;
+ case 1: stb( src, dst); break;
+ default: ShouldNotReachHere();
+ }
+}
+
+
+void MacroAssembler::float_cmp( bool is_float, int unordered_result,
+ FloatRegister Fa, FloatRegister Fb,
+ Register Rresult) {
+
+ fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb);
+
+ Condition lt = unordered_result == -1 ? f_unorderedOrLess : f_less;
+ Condition eq = f_equal;
+ Condition gt = unordered_result == 1 ? f_unorderedOrGreater : f_greater;
+
+ if (VM_Version::v9_instructions_work()) {
+
+ mov(-1, Rresult);
+ movcc(eq, true, fcc0, 0, Rresult);
+ movcc(gt, true, fcc0, 1, Rresult);
+
+ } else {
+ Label done;
+
+ set( -1, Rresult );
+ //fb(lt, true, pn, done); delayed()->set( -1, Rresult );
+ fb( eq, true, pn, done); delayed()->set( 0, Rresult );
+ fb( gt, true, pn, done); delayed()->set( 1, Rresult );
+
+ bind (done);
+ }
+}
+
+
+void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
+{
+ if (VM_Version::v9_instructions_work()) {
+ Assembler::fneg(w, s, d);
+ } else {
+ if (w == FloatRegisterImpl::S) {
+ Assembler::fneg(w, s, d);
+ } else if (w == FloatRegisterImpl::D) {
+ // number() does a sanity check on the alignment.
+ assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
+ ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
+
+ Assembler::fneg(FloatRegisterImpl::S, s, d);
+ Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
+ } else {
+ assert(w == FloatRegisterImpl::Q, "Invalid float register width");
+
+ // number() does a sanity check on the alignment.
+ assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
+ ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
+
+ Assembler::fneg(FloatRegisterImpl::S, s, d);
+ Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
+ Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
+ Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
+ }
+ }
+}
+
+void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
+{
+ if (VM_Version::v9_instructions_work()) {
+ Assembler::fmov(w, s, d);
+ } else {
+ if (w == FloatRegisterImpl::S) {
+ Assembler::fmov(w, s, d);
+ } else if (w == FloatRegisterImpl::D) {
+ // number() does a sanity check on the alignment.
+ assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
+ ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
+
+ Assembler::fmov(FloatRegisterImpl::S, s, d);
+ Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
+ } else {
+ assert(w == FloatRegisterImpl::Q, "Invalid float register width");
+
+ // number() does a sanity check on the alignment.
+ assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
+ ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
+
+ Assembler::fmov(FloatRegisterImpl::S, s, d);
+ Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
+ Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
+ Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
+ }
+ }
+}
+
+void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
+{
+ if (VM_Version::v9_instructions_work()) {
+ Assembler::fabs(w, s, d);
+ } else {
+ if (w == FloatRegisterImpl::S) {
+ Assembler::fabs(w, s, d);
+ } else if (w == FloatRegisterImpl::D) {
+ // number() does a sanity check on the alignment.
+ assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
+ ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
+
+ Assembler::fabs(FloatRegisterImpl::S, s, d);
+ Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
+ } else {
+ assert(w == FloatRegisterImpl::Q, "Invalid float register width");
+
+ // number() does a sanity check on the alignment.
+ assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
+ ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
+
+ Assembler::fabs(FloatRegisterImpl::S, s, d);
+ Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
+ Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
+ Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
+ }
+ }
+}
+
+void MacroAssembler::save_all_globals_into_locals() {
+ mov(G1,L1);
+ mov(G2,L2);
+ mov(G3,L3);
+ mov(G4,L4);
+ mov(G5,L5);
+ mov(G6,L6);
+ mov(G7,L7);
+}
+
+void MacroAssembler::restore_globals_from_locals() {
+ mov(L1,G1);
+ mov(L2,G2);
+ mov(L3,G3);
+ mov(L4,G4);
+ mov(L5,G5);
+ mov(L6,G6);
+ mov(L7,G7);
+}
+
+// Use for 64 bit operation.
+void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
+{
+ // store ptr_reg as the new top value
+#ifdef _LP64
+ casx(top_ptr_reg, top_reg, ptr_reg);
+#else
+ cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm);
+#endif // _LP64
+}
+
+// [RGV] This routine does not handle 64 bit operations.
+// use casx_under_lock() or casx directly!!!
+void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
+{
+ // store ptr_reg as the new top value
+ if (VM_Version::v9_instructions_work()) {
+ cas(top_ptr_reg, top_reg, ptr_reg);
+ } else {
+
+ // If the register is not an out nor global, it is not visible
+ // after the save. Allocate a register for it, save its
+ // value in the register save area (the save may not flush
+ // registers to the save area).
+
+ Register top_ptr_reg_after_save;
+ Register top_reg_after_save;
+ Register ptr_reg_after_save;
+
+ if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) {
+ top_ptr_reg_after_save = top_ptr_reg->after_save();
+ } else {
+ Address reg_save_addr = top_ptr_reg->address_in_saved_window();
+ top_ptr_reg_after_save = L0;
+ st(top_ptr_reg, reg_save_addr);
+ }
+
+ if (top_reg->is_out() || top_reg->is_global()) {
+ top_reg_after_save = top_reg->after_save();
+ } else {
+ Address reg_save_addr = top_reg->address_in_saved_window();
+ top_reg_after_save = L1;
+ st(top_reg, reg_save_addr);
+ }
+
+ if (ptr_reg->is_out() || ptr_reg->is_global()) {
+ ptr_reg_after_save = ptr_reg->after_save();
+ } else {
+ Address reg_save_addr = ptr_reg->address_in_saved_window();
+ ptr_reg_after_save = L2;
+ st(ptr_reg, reg_save_addr);
+ }
+
+ const Register& lock_reg = L3;
+ const Register& lock_ptr_reg = L4;
+ const Register& value_reg = L5;
+ const Register& yield_reg = L6;
+ const Register& yieldall_reg = L7;
+
+ save_frame();
+
+ if (top_ptr_reg_after_save == L0) {
+ ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save);
+ }
+
+ if (top_reg_after_save == L1) {
+ ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save);
+ }
+
+ if (ptr_reg_after_save == L2) {
+ ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save);
+ }
+
+ Label(retry_get_lock);
+ Label(not_same);
+ Label(dont_yield);
+
+ assert(lock_addr, "lock_address should be non null for v8");
+ set((intptr_t)lock_addr, lock_ptr_reg);
+ // Initialize yield counter
+ mov(G0,yield_reg);
+ mov(G0, yieldall_reg);
+ set(StubRoutines::Sparc::locked, lock_reg);
+
+ bind(retry_get_lock);
+ cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dont_yield);
+
+ if(use_call_vm) {
+ Untested("Need to verify global reg consistancy");
+ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg);
+ } else {
+ // Save the regs and make space for a C call
+ save(SP, -96, SP);
+ save_all_globals_into_locals();
+ call(CAST_FROM_FN_PTR(address,os::yield_all));
+ delayed()->mov(yieldall_reg, O0);
+ restore_globals_from_locals();
+ restore();
+ }
+
+ // reset the counter
+ mov(G0,yield_reg);
+ add(yieldall_reg, 1, yieldall_reg);
+
+ bind(dont_yield);
+ // try to get lock
+ Assembler::swap(lock_ptr_reg, 0, lock_reg);
+
+ // did we get the lock?
+ cmp(lock_reg, StubRoutines::Sparc::unlocked);
+ br(Assembler::notEqual, true, Assembler::pn, retry_get_lock);
+ delayed()->add(yield_reg,1,yield_reg);
+
+ // yes, got lock. do we have the same top?
+ ld(top_ptr_reg_after_save, 0, value_reg);
+ cmp_and_br_short(value_reg, top_reg_after_save, Assembler::notEqual, Assembler::pn, not_same);
+
+ // yes, same top.
+ st(ptr_reg_after_save, top_ptr_reg_after_save, 0);
+ membar(Assembler::StoreStore);
+
+ bind(not_same);
+ mov(value_reg, ptr_reg_after_save);
+ st(lock_reg, lock_ptr_reg, 0); // unlock
+
+ restore();
+ }
+}
+
+RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
+ Register tmp,
+ int offset) {
+ intptr_t value = *delayed_value_addr;
+ if (value != 0)
+ return RegisterOrConstant(value + offset);
+
+ // load indirectly to solve generation ordering problem
+ AddressLiteral a(delayed_value_addr);
+ load_ptr_contents(a, tmp);
+
+#ifdef ASSERT
+ tst(tmp);
+ breakpoint_trap(zero, xcc);
+#endif
+
+ if (offset != 0)
+ add(tmp, offset, tmp);
+
+ return RegisterOrConstant(tmp);
+}
+
+
+RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
+ assert(d.register_or_noreg() != G0, "lost side effect");
+ if ((s2.is_constant() && s2.as_constant() == 0) ||
+ (s2.is_register() && s2.as_register() == G0)) {
+ // Do nothing, just move value.
+ if (s1.is_register()) {
+ if (d.is_constant()) d = temp;
+ mov(s1.as_register(), d.as_register());
+ return d;
+ } else {
+ return s1;
+ }
+ }
+
+ if (s1.is_register()) {
+ assert_different_registers(s1.as_register(), temp);
+ if (d.is_constant()) d = temp;
+ andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
+ return d;
+ } else {
+ if (s2.is_register()) {
+ assert_different_registers(s2.as_register(), temp);
+ if (d.is_constant()) d = temp;
+ set(s1.as_constant(), temp);
+ andn(temp, s2.as_register(), d.as_register());
+ return d;
+ } else {
+ intptr_t res = s1.as_constant() & ~s2.as_constant();
+ return res;
+ }
+ }
+}
+
+RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
+ assert(d.register_or_noreg() != G0, "lost side effect");
+ if ((s2.is_constant() && s2.as_constant() == 0) ||
+ (s2.is_register() && s2.as_register() == G0)) {
+ // Do nothing, just move value.
+ if (s1.is_register()) {
+ if (d.is_constant()) d = temp;
+ mov(s1.as_register(), d.as_register());
+ return d;
+ } else {
+ return s1;
+ }
+ }
+
+ if (s1.is_register()) {
+ assert_different_registers(s1.as_register(), temp);
+ if (d.is_constant()) d = temp;
+ add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
+ return d;
+ } else {
+ if (s2.is_register()) {
+ assert_different_registers(s2.as_register(), temp);
+ if (d.is_constant()) d = temp;
+ add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register());
+ return d;
+ } else {
+ intptr_t res = s1.as_constant() + s2.as_constant();
+ return res;
+ }
+ }
+}
+
+RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
+ assert(d.register_or_noreg() != G0, "lost side effect");
+ if (!is_simm13(s2.constant_or_zero()))
+ s2 = (s2.as_constant() & 0xFF);
+ if ((s2.is_constant() && s2.as_constant() == 0) ||
+ (s2.is_register() && s2.as_register() == G0)) {
+ // Do nothing, just move value.
+ if (s1.is_register()) {
+ if (d.is_constant()) d = temp;
+ mov(s1.as_register(), d.as_register());
+ return d;
+ } else {
+ return s1;
+ }
+ }
+
+ if (s1.is_register()) {
+ assert_different_registers(s1.as_register(), temp);
+ if (d.is_constant()) d = temp;
+ sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
+ return d;
+ } else {
+ if (s2.is_register()) {
+ assert_different_registers(s2.as_register(), temp);
+ if (d.is_constant()) d = temp;
+ set(s1.as_constant(), temp);
+ sll_ptr(temp, s2.as_register(), d.as_register());
+ return d;
+ } else {
+ intptr_t res = s1.as_constant() << s2.as_constant();
+ return res;
+ }
+ }
+}
+
+
+// Look up the method for a megamorphic invokeinterface call.
+// The target method is determined by <intf_klass, itable_index>.
+// The receiver klass is in recv_klass.
+// On success, the result will be in method_result, and execution falls through.
+// On failure, execution transfers to the given label.
+void MacroAssembler::lookup_interface_method(Register recv_klass,
+ Register intf_klass,
+ RegisterOrConstant itable_index,
+ Register method_result,
+ Register scan_temp,
+ Register sethi_temp,
+ Label& L_no_such_interface) {
+ assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
+ assert(itable_index.is_constant() || itable_index.as_register() == method_result,
+ "caller must use same register for non-constant itable index as for method");
+
+ Label L_no_such_interface_restore;
+ bool did_save = false;
+ if (scan_temp == noreg || sethi_temp == noreg) {
+ Register recv_2 = recv_klass->is_global() ? recv_klass : L0;
+ Register intf_2 = intf_klass->is_global() ? intf_klass : L1;
+ assert(method_result->is_global(), "must be able to return value");
+ scan_temp = L2;
+ sethi_temp = L3;
+ save_frame_and_mov(0, recv_klass, recv_2, intf_klass, intf_2);
+ recv_klass = recv_2;
+ intf_klass = intf_2;
+ did_save = true;
+ }
+
+ // Compute start of first itableOffsetEntry (which is at the end of the vtable)
+ int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
+ int scan_step = itableOffsetEntry::size() * wordSize;
+ int vte_size = vtableEntry::size() * wordSize;
+
+ lduw(recv_klass, InstanceKlass::vtable_length_offset() * wordSize, scan_temp);
+ // %%% We should store the aligned, prescaled offset in the klassoop.
+ // Then the next several instructions would fold away.
+
+ int round_to_unit = ((HeapWordsPerLong > 1) ? BytesPerLong : 0);
+ int itb_offset = vtable_base;
+ if (round_to_unit != 0) {
+ // hoist first instruction of round_to(scan_temp, BytesPerLong):
+ itb_offset += round_to_unit - wordSize;
+ }
+ int itb_scale = exact_log2(vtableEntry::size() * wordSize);
+ sll(scan_temp, itb_scale, scan_temp);
+ add(scan_temp, itb_offset, scan_temp);
+ if (round_to_unit != 0) {
+ // Round up to align_object_offset boundary
+ // see code for InstanceKlass::start_of_itable!
+ // Was: round_to(scan_temp, BytesPerLong);
+ // Hoisted: add(scan_temp, BytesPerLong-1, scan_temp);
+ and3(scan_temp, -round_to_unit, scan_temp);
+ }
+ add(recv_klass, scan_temp, scan_temp);
+
+ // Adjust recv_klass by scaled itable_index, so we can free itable_index.
+ RegisterOrConstant itable_offset = itable_index;
+ itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset);
+ itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset);
+ add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass);
+
+ // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
+ // if (scan->interface() == intf) {
+ // result = (klass + scan->offset() + itable_index);
+ // }
+ // }
+ Label L_search, L_found_method;
+
+ for (int peel = 1; peel >= 0; peel--) {
+ // %%%% Could load both offset and interface in one ldx, if they were
+ // in the opposite order. This would save a load.
+ ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result);
+
+ // Check that this entry is non-null. A null entry means that
+ // the receiver class doesn't implement the interface, and wasn't the
+ // same as when the caller was compiled.
+ bpr(Assembler::rc_z, false, Assembler::pn, method_result, did_save ? L_no_such_interface_restore : L_no_such_interface);
+ delayed()->cmp(method_result, intf_klass);
+
+ if (peel) {
+ brx(Assembler::equal, false, Assembler::pt, L_found_method);
+ } else {
+ brx(Assembler::notEqual, false, Assembler::pn, L_search);
+ // (invert the test to fall through to found_method...)
+ }
+ delayed()->add(scan_temp, scan_step, scan_temp);
+
+ if (!peel) break;
+
+ bind(L_search);
+ }
+
+ bind(L_found_method);
+
+ // Got a hit.
+ int ito_offset = itableOffsetEntry::offset_offset_in_bytes();
+ // scan_temp[-scan_step] points to the vtable offset we need
+ ito_offset -= scan_step;
+ lduw(scan_temp, ito_offset, scan_temp);
+ ld_ptr(recv_klass, scan_temp, method_result);
+
+ if (did_save) {
+ Label L_done;
+ ba(L_done);
+ delayed()->restore();
+
+ bind(L_no_such_interface_restore);
+ ba(L_no_such_interface);
+ delayed()->restore();
+
+ bind(L_done);
+ }
+}
+
+
+// virtual method calling
+void MacroAssembler::lookup_virtual_method(Register recv_klass,
+ RegisterOrConstant vtable_index,
+ Register method_result) {
+ assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg());
+ Register sethi_temp = method_result;
+ const int base = (InstanceKlass::vtable_start_offset() * wordSize +
+ // method pointer offset within the vtable entry:
+ vtableEntry::method_offset_in_bytes());
+ RegisterOrConstant vtable_offset = vtable_index;
+ // Each of the following three lines potentially generates an instruction.
+ // But the total number of address formation instructions will always be
+ // at most two, and will often be zero. In any case, it will be optimal.
+ // If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x).
+ // If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t).
+ vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size() * wordSize), vtable_offset);
+ vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp);
+ Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp));
+ ld_ptr(vtable_entry_addr, method_result);
+}
+
+
+void MacroAssembler::check_klass_subtype(Register sub_klass,
+ Register super_klass,
+ Register temp_reg,
+ Register temp2_reg,
+ Label& L_success) {
+ Register sub_2 = sub_klass;
+ Register sup_2 = super_klass;
+ if (!sub_2->is_global()) sub_2 = L0;
+ if (!sup_2->is_global()) sup_2 = L1;
+ bool did_save = false;
+ if (temp_reg == noreg || temp2_reg == noreg) {
+ temp_reg = L2;
+ temp2_reg = L3;
+ save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2);
+ sub_klass = sub_2;
+ super_klass = sup_2;
+ did_save = true;
+ }
+ Label L_failure, L_pop_to_failure, L_pop_to_success;
+ check_klass_subtype_fast_path(sub_klass, super_klass,
+ temp_reg, temp2_reg,
+ (did_save ? &L_pop_to_success : &L_success),
+ (did_save ? &L_pop_to_failure : &L_failure), NULL);
+
+ if (!did_save)
+ save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2);
+ check_klass_subtype_slow_path(sub_2, sup_2,
+ L2, L3, L4, L5,
+ NULL, &L_pop_to_failure);
+
+ // on success:
+ bind(L_pop_to_success);
+ restore();
+ ba_short(L_success);
+
+ // on failure:
+ bind(L_pop_to_failure);
+ restore();
+ bind(L_failure);
+}
+
+
+void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
+ Register super_klass,
+ Register temp_reg,
+ Register temp2_reg,
+ Label* L_success,
+ Label* L_failure,
+ Label* L_slow_path,
+ RegisterOrConstant super_check_offset) {
+ int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
+ int sco_offset = in_bytes(Klass::super_check_offset_offset());
+
+ bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
+ bool need_slow_path = (must_load_sco ||
+ super_check_offset.constant_or_zero() == sco_offset);
+
+ assert_different_registers(sub_klass, super_klass, temp_reg);
+ if (super_check_offset.is_register()) {
+ assert_different_registers(sub_klass, super_klass, temp_reg,
+ super_check_offset.as_register());
+ } else if (must_load_sco) {
+ assert(temp2_reg != noreg, "supply either a temp or a register offset");
+ }
+
+ Label L_fallthrough;
+ int label_nulls = 0;
+ if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
+ if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
+ if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
+ assert(label_nulls <= 1 ||
+ (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
+ "at most one NULL in the batch, usually");
+
+ // If the pointers are equal, we are done (e.g., String[] elements).
+ // This self-check enables sharing of secondary supertype arrays among
+ // non-primary types such as array-of-interface. Otherwise, each such
+ // type would need its own customized SSA.
+ // We move this check to the front of the fast path because many
+ // type checks are in fact trivially successful in this manner,
+ // so we get a nicely predicted branch right at the start of the check.
+ cmp(super_klass, sub_klass);
+ brx(Assembler::equal, false, Assembler::pn, *L_success);
+ delayed()->nop();
+
+ // Check the supertype display:
+ if (must_load_sco) {
+ // The super check offset is always positive...
+ lduw(super_klass, sco_offset, temp2_reg);
+ super_check_offset = RegisterOrConstant(temp2_reg);
+ // super_check_offset is register.
+ assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register());
+ }
+ ld_ptr(sub_klass, super_check_offset, temp_reg);
+ cmp(super_klass, temp_reg);
+
+ // This check has worked decisively for primary supers.
+ // Secondary supers are sought in the super_cache ('super_cache_addr').
+ // (Secondary supers are interfaces and very deeply nested subtypes.)
+ // This works in the same check above because of a tricky aliasing
+ // between the super_cache and the primary super display elements.
+ // (The 'super_check_addr' can address either, as the case requires.)
+ // Note that the cache is updated below if it does not help us find
+ // what we need immediately.
+ // So if it was a primary super, we can just fail immediately.
+ // Otherwise, it's the slow path for us (no success at this point).
+
+ // Hacked ba(), which may only be used just before L_fallthrough.
+#define FINAL_JUMP(label) \
+ if (&(label) != &L_fallthrough) { \
+ ba(label); delayed()->nop(); \
+ }
+
+ if (super_check_offset.is_register()) {
+ brx(Assembler::equal, false, Assembler::pn, *L_success);
+ delayed()->cmp(super_check_offset.as_register(), sc_offset);
+
+ if (L_failure == &L_fallthrough) {
+ brx(Assembler::equal, false, Assembler::pt, *L_slow_path);
+ delayed()->nop();
+ } else {
+ brx(Assembler::notEqual, false, Assembler::pn, *L_failure);
+ delayed()->nop();
+ FINAL_JUMP(*L_slow_path);
+ }
+ } else if (super_check_offset.as_constant() == sc_offset) {
+ // Need a slow path; fast failure is impossible.
+ if (L_slow_path == &L_fallthrough) {
+ brx(Assembler::equal, false, Assembler::pt, *L_success);
+ delayed()->nop();
+ } else {
+ brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path);
+ delayed()->nop();
+ FINAL_JUMP(*L_success);
+ }
+ } else {
+ // No slow path; it's a fast decision.
+ if (L_failure == &L_fallthrough) {
+ brx(Assembler::equal, false, Assembler::pt, *L_success);
+ delayed()->nop();
+ } else {
+ brx(Assembler::notEqual, false, Assembler::pn, *L_failure);
+ delayed()->nop();
+ FINAL_JUMP(*L_success);
+ }
+ }
+
+ bind(L_fallthrough);
+
+#undef FINAL_JUMP
+}
+
+
+void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
+ Register super_klass,
+ Register count_temp,
+ Register scan_temp,
+ Register scratch_reg,
+ Register coop_reg,
+ Label* L_success,
+ Label* L_failure) {
+ assert_different_registers(sub_klass, super_klass,
+ count_temp, scan_temp, scratch_reg, coop_reg);
+
+ Label L_fallthrough, L_loop;
+ int label_nulls = 0;
+ if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
+ if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
+ assert(label_nulls <= 1, "at most one NULL in the batch");
+
+ // a couple of useful fields in sub_klass:
+ int ss_offset = in_bytes(Klass::secondary_supers_offset());
+ int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
+
+ // Do a linear scan of the secondary super-klass chain.
+ // This code is rarely used, so simplicity is a virtue here.
+
+#ifndef PRODUCT
+ int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
+ inc_counter((address) pst_counter, count_temp, scan_temp);
+#endif
+
+ // We will consult the secondary-super array.
+ ld_ptr(sub_klass, ss_offset, scan_temp);
+
+ Register search_key = super_klass;
+
+ // Load the array length. (Positive movl does right thing on LP64.)
+ lduw(scan_temp, Array<Klass*>::length_offset_in_bytes(), count_temp);
+
+ // Check for empty secondary super list
+ tst(count_temp);
+
+ // In the array of super classes elements are pointer sized.
+ int element_size = wordSize;
+
+ // Top of search loop
+ bind(L_loop);
+ br(Assembler::equal, false, Assembler::pn, *L_failure);
+ delayed()->add(scan_temp, element_size, scan_temp);
+
+ // Skip the array header in all array accesses.
+ int elem_offset = Array<Klass*>::base_offset_in_bytes();
+ elem_offset -= element_size; // the scan pointer was pre-incremented also
+
+ // Load next super to check
+ ld_ptr( scan_temp, elem_offset, scratch_reg );
+
+ // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
+ cmp(scratch_reg, search_key);
+
+ // A miss means we are NOT a subtype and need to keep looping
+ brx(Assembler::notEqual, false, Assembler::pn, L_loop);
+ delayed()->deccc(count_temp); // decrement trip counter in delay slot
+
+ // Success. Cache the super we found and proceed in triumph.
+ st_ptr(super_klass, sub_klass, sc_offset);
+
+ if (L_success != &L_fallthrough) {
+ ba(*L_success);
+ delayed()->nop();
+ }
+
+ bind(L_fallthrough);
+}
+
+
+RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
+ Register temp_reg,
+ int extra_slot_offset) {
+ // cf. TemplateTable::prepare_invoke(), if (load_receiver).
+ int stackElementSize = Interpreter::stackElementSize;
+ int offset = extra_slot_offset * stackElementSize;
+ if (arg_slot.is_constant()) {
+ offset += arg_slot.as_constant() * stackElementSize;
+ return offset;
+ } else {
+ assert(temp_reg != noreg, "must specify");
+ sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg);
+ if (offset != 0)
+ add(temp_reg, offset, temp_reg);
+ return temp_reg;
+ }
+}
+
+
+Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
+ Register temp_reg,
+ int extra_slot_offset) {
+ return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset));
+}
+
+
+void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
+ Register temp_reg,
+ Label& done, Label* slow_case,
+ BiasedLockingCounters* counters) {
+ assert(UseBiasedLocking, "why call this otherwise?");
+
+ if (PrintBiasedLockingStatistics) {
+ assert_different_registers(obj_reg, mark_reg, temp_reg, O7);
+ if (counters == NULL)
+ counters = BiasedLocking::counters();
+ }
+
+ Label cas_label;
+
+ // Biased locking
+ // See whether the lock is currently biased toward our thread and
+ // whether the epoch is still valid
+ // Note that the runtime guarantees sufficient alignment of JavaThread
+ // pointers to allow age to be placed into low bits
+ assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
+ and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
+ cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label);
+
+ load_klass(obj_reg, temp_reg);
+ ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
+ or3(G2_thread, temp_reg, temp_reg);
+ xor3(mark_reg, temp_reg, temp_reg);
+ andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
+ if (counters != NULL) {
+ cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg);
+ // Reload mark_reg as we may need it later
+ ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg);
+ }
+ brx(Assembler::equal, true, Assembler::pt, done);
+ delayed()->nop();
+
+ Label try_revoke_bias;
+ Label try_rebias;
+ Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes());
+ assert(mark_addr.disp() == 0, "cas must take a zero displacement");
+
+ // At this point we know that the header has the bias pattern and
+ // that we are not the bias owner in the current epoch. We need to
+ // figure out more details about the state of the header in order to
+ // know what operations can be legally performed on the object's
+ // header.
+
+ // If the low three bits in the xor result aren't clear, that means
+ // the prototype header is no longer biased and we have to revoke
+ // the bias on this object.
+ btst(markOopDesc::biased_lock_mask_in_place, temp_reg);
+ brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias);
+
+ // Biasing is still enabled for this data type. See whether the
+ // epoch of the current bias is still valid, meaning that the epoch
+ // bits of the mark word are equal to the epoch bits of the
+ // prototype header. (Note that the prototype header's epoch bits
+ // only change at a safepoint.) If not, attempt to rebias the object
+ // toward the current thread. Note that we must be absolutely sure
+ // that the current epoch is invalid in order to do this because
+ // otherwise the manipulations it performs on the mark word are
+ // illegal.
+ delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg);
+ brx(Assembler::notZero, false, Assembler::pn, try_rebias);
+
+ // The epoch of the current bias is still valid but we know nothing
+ // about the owner; it might be set or it might be clear. Try to
+ // acquire the bias of the object using an atomic operation. If this
+ // fails we will go in to the runtime to revoke the object's bias.
+ // Note that we first construct the presumed unbiased header so we
+ // don't accidentally blow away another thread's valid bias.
+ delayed()->and3(mark_reg,
+ markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
+ mark_reg);
+ or3(G2_thread, mark_reg, temp_reg);
+ casn(mark_addr.base(), mark_reg, temp_reg);
+ // If the biasing toward our thread failed, this means that
+ // another thread succeeded in biasing it toward itself and we
+ // need to revoke that bias. The revocation will occur in the
+ // interpreter runtime in the slow case.
+ cmp(mark_reg, temp_reg);
+ if (counters != NULL) {
+ cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg);
+ }
+ if (slow_case != NULL) {
+ brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
+ delayed()->nop();
+ }
+ ba_short(done);
+
+ bind(try_rebias);
+ // At this point we know the epoch has expired, meaning that the
+ // current "bias owner", if any, is actually invalid. Under these
+ // circumstances _only_, we are allowed to use the current header's
+ // value as the comparison value when doing the cas to acquire the
+ // bias in the current epoch. In other words, we allow transfer of
+ // the bias from one thread to another directly in this situation.
+ //
+ // FIXME: due to a lack of registers we currently blow away the age
+ // bits in this situation. Should attempt to preserve them.
+ load_klass(obj_reg, temp_reg);
+ ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
+ or3(G2_thread, temp_reg, temp_reg);
+ casn(mark_addr.base(), mark_reg, temp_reg);
+ // If the biasing toward our thread failed, this means that
+ // another thread succeeded in biasing it toward itself and we
+ // need to revoke that bias. The revocation will occur in the
+ // interpreter runtime in the slow case.
+ cmp(mark_reg, temp_reg);
+ if (counters != NULL) {
+ cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg);
+ }
+ if (slow_case != NULL) {
+ brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
+ delayed()->nop();
+ }
+ ba_short(done);
+
+ bind(try_revoke_bias);
+ // The prototype mark in the klass doesn't have the bias bit set any
+ // more, indicating that objects of this data type are not supposed
+ // to be biased any more. We are going to try to reset the mark of
+ // this object to the prototype value and fall through to the
+ // CAS-based locking scheme. Note that if our CAS fails, it means
+ // that another thread raced us for the privilege of revoking the
+ // bias of this particular object, so it's okay to continue in the
+ // normal locking code.
+ //
+ // FIXME: due to a lack of registers we currently blow away the age
+ // bits in this situation. Should attempt to preserve them.
+ load_klass(obj_reg, temp_reg);
+ ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
+ casn(mark_addr.base(), mark_reg, temp_reg);
+ // Fall through to the normal CAS-based lock, because no matter what
+ // the result of the above CAS, some thread must have succeeded in
+ // removing the bias bit from the object's header.
+ if (counters != NULL) {
+ cmp(mark_reg, temp_reg);
+ cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg);
+ }
+
+ bind(cas_label);
+}
+
+void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done,
+ bool allow_delay_slot_filling) {
+ // Check for biased locking unlock case, which is a no-op
+ // Note: we do not have to check the thread ID for two reasons.
+ // First, the interpreter checks for IllegalMonitorStateException at
+ // a higher level. Second, if the bias was revoked while we held the
+ // lock, the object could not be rebiased toward another thread, so
+ // the bias bit would be clear.
+ ld_ptr(mark_addr, temp_reg);
+ and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
+ cmp(temp_reg, markOopDesc::biased_lock_pattern);
+ brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done);
+ delayed();
+ if (!allow_delay_slot_filling) {
+ nop();
+ }
+}
+
+
+// CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by
+// Solaris/SPARC's "as". Another apt name would be cas_ptr()
+
+void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) {
+ casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
+}
+
+
+
+// compiler_lock_object() and compiler_unlock_object() are direct transliterations
+// of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments.
+// The code could be tightened up considerably.
+//
+// box->dhw disposition - post-conditions at DONE_LABEL.
+// - Successful inflated lock: box->dhw != 0.
+// Any non-zero value suffices.
+// Consider G2_thread, rsp, boxReg, or unused_mark()
+// - Successful Stack-lock: box->dhw == mark.
+// box->dhw must contain the displaced mark word value
+// - Failure -- icc.ZFlag == 0 and box->dhw is undefined.
+// The slow-path fast_enter() and slow_enter() operators
+// are responsible for setting box->dhw = NonZero (typically ::unused_mark).
+// - Biased: box->dhw is undefined
+//
+// SPARC refworkload performance - specifically jetstream and scimark - are
+// extremely sensitive to the size of the code emitted by compiler_lock_object
+// and compiler_unlock_object. Critically, the key factor is code size, not path
+// length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the
+// effect).
+
+
+void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
+ Register Rbox, Register Rscratch,
+ BiasedLockingCounters* counters,
+ bool try_bias) {
+ Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
+
+ verify_oop(Roop);
+ Label done ;
+
+ if (counters != NULL) {
+ inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch);
+ }
+
+ if (EmitSync & 1) {
+ mov(3, Rscratch);
+ st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
+ cmp(SP, G0);
+ return ;
+ }
+
+ if (EmitSync & 2) {
+
+ // Fetch object's markword
+ ld_ptr(mark_addr, Rmark);
+
+ if (try_bias) {
+ biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
+ }
+
+ // Save Rbox in Rscratch to be used for the cas operation
+ mov(Rbox, Rscratch);
+
+ // set Rmark to markOop | markOopDesc::unlocked_value
+ or3(Rmark, markOopDesc::unlocked_value, Rmark);
+
+ // Initialize the box. (Must happen before we update the object mark!)
+ st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
+
+ // compare object markOop with Rmark and if equal exchange Rscratch with object markOop
+ assert(mark_addr.disp() == 0, "cas must take a zero displacement");
+ casx_under_lock(mark_addr.base(), Rmark, Rscratch,
+ (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
+
+ // if compare/exchange succeeded we found an unlocked object and we now have locked it
+ // hence we are done
+ cmp(Rmark, Rscratch);
+#ifdef _LP64
+ sub(Rscratch, STACK_BIAS, Rscratch);
+#endif
+ brx(Assembler::equal, false, Assembler::pt, done);
+ delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot
+
+ // we did not find an unlocked object so see if this is a recursive case
+ // sub(Rscratch, SP, Rscratch);
+ assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
+ andcc(Rscratch, 0xfffff003, Rscratch);
+ st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
+ bind (done);
+ return ;
+ }
+
+ Label Egress ;
+
+ if (EmitSync & 256) {
+ Label IsInflated ;
+
+ ld_ptr(mark_addr, Rmark); // fetch obj->mark
+ // Triage: biased, stack-locked, neutral, inflated
+ if (try_bias) {
+ biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
+ // Invariant: if control reaches this point in the emitted stream
+ // then Rmark has not been modified.
+ }
+
+ // Store mark into displaced mark field in the on-stack basic-lock "box"
+ // Critically, this must happen before the CAS
+ // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty.
+ st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
+ andcc(Rmark, 2, G0);
+ brx(Assembler::notZero, false, Assembler::pn, IsInflated);
+ delayed()->
+
+ // Try stack-lock acquisition.
+ // Beware: the 1st instruction is in a delay slot
+ mov(Rbox, Rscratch);
+ or3(Rmark, markOopDesc::unlocked_value, Rmark);
+ assert(mark_addr.disp() == 0, "cas must take a zero displacement");
+ casn(mark_addr.base(), Rmark, Rscratch);
+ cmp(Rmark, Rscratch);
+ brx(Assembler::equal, false, Assembler::pt, done);
+ delayed()->sub(Rscratch, SP, Rscratch);
+
+ // Stack-lock attempt failed - check for recursive stack-lock.
+ // See the comments below about how we might remove this case.
+#ifdef _LP64
+ sub(Rscratch, STACK_BIAS, Rscratch);
+#endif
+ assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
+ andcc(Rscratch, 0xfffff003, Rscratch);
+ br(Assembler::always, false, Assembler::pt, done);
+ delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
+
+ bind(IsInflated);
+ if (EmitSync & 64) {
+ // If m->owner != null goto IsLocked
+ // Pessimistic form: Test-and-CAS vs CAS
+ // The optimistic form avoids RTS->RTO cache line upgrades.
+ ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
+ andcc(Rscratch, Rscratch, G0);
+ brx(Assembler::notZero, false, Assembler::pn, done);
+ delayed()->nop();
+ // m->owner == null : it's unlocked.
+ }
+
+ // Try to CAS m->owner from null to Self
+ // Invariant: if we acquire the lock then _recursions should be 0.
+ add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
+ mov(G2_thread, Rscratch);
+ casn(Rmark, G0, Rscratch);
+ cmp(Rscratch, G0);
+ // Intentional fall-through into done
+ } else {
+ // Aggressively avoid the Store-before-CAS penalty
+ // Defer the store into box->dhw until after the CAS
+ Label IsInflated, Recursive ;
+
+// Anticipate CAS -- Avoid RTS->RTO upgrade
+// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
+
+ ld_ptr(mark_addr, Rmark); // fetch obj->mark
+ // Triage: biased, stack-locked, neutral, inflated
+
+ if (try_bias) {
+ biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
+ // Invariant: if control reaches this point in the emitted stream
+ // then Rmark has not been modified.
+ }
+ andcc(Rmark, 2, G0);
+ brx(Assembler::notZero, false, Assembler::pn, IsInflated);
+ delayed()-> // Beware - dangling delay-slot
+
+ // Try stack-lock acquisition.
+ // Transiently install BUSY (0) encoding in the mark word.
+ // if the CAS of 0 into the mark was successful then we execute:
+ // ST box->dhw = mark -- save fetched mark in on-stack basiclock box
+ // ST obj->mark = box -- overwrite transient 0 value
+ // This presumes TSO, of course.
+
+ mov(0, Rscratch);
+ or3(Rmark, markOopDesc::unlocked_value, Rmark);
+ assert(mark_addr.disp() == 0, "cas must take a zero displacement");
+ casn(mark_addr.base(), Rmark, Rscratch);
+// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
+ cmp(Rscratch, Rmark);
+ brx(Assembler::notZero, false, Assembler::pn, Recursive);
+ delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
+ if (counters != NULL) {
+ cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
+ }
+ ba(done);
+ delayed()->st_ptr(Rbox, mark_addr);
+
+ bind(Recursive);
+ // Stack-lock attempt failed - check for recursive stack-lock.
+ // Tests show that we can remove the recursive case with no impact
+ // on refworkload 0.83. If we need to reduce the size of the code
+ // emitted by compiler_lock_object() the recursive case is perfect
+ // candidate.
+ //
+ // A more extreme idea is to always inflate on stack-lock recursion.
+ // This lets us eliminate the recursive checks in compiler_lock_object
+ // and compiler_unlock_object and the (box->dhw == 0) encoding.
+ // A brief experiment - requiring changes to synchronizer.cpp, interpreter,
+ // and showed a performance *increase*. In the same experiment I eliminated
+ // the fast-path stack-lock code from the interpreter and always passed
+ // control to the "slow" operators in synchronizer.cpp.
+
+ // RScratch contains the fetched obj->mark value from the failed CASN.
+#ifdef _LP64
+ sub(Rscratch, STACK_BIAS, Rscratch);
+#endif
+ sub(Rscratch, SP, Rscratch);
+ assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
+ andcc(Rscratch, 0xfffff003, Rscratch);
+ if (counters != NULL) {
+ // Accounting needs the Rscratch register
+ st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
+ cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
+ ba_short(done);
+ } else {
+ ba(done);
+ delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
+ }
+
+ bind (IsInflated);
+ if (EmitSync & 64) {
+ // If m->owner != null goto IsLocked
+ // Test-and-CAS vs CAS
+ // Pessimistic form avoids futile (doomed) CAS attempts
+ // The optimistic form avoids RTS->RTO cache line upgrades.
+ ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
+ andcc(Rscratch, Rscratch, G0);
+ brx(Assembler::notZero, false, Assembler::pn, done);
+ delayed()->nop();
+ // m->owner == null : it's unlocked.
+ }
+
+ // Try to CAS m->owner from null to Self
+ // Invariant: if we acquire the lock then _recursions should be 0.
+ add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
+ mov(G2_thread, Rscratch);
+ casn(Rmark, G0, Rscratch);
+ cmp(Rscratch, G0);
+ // ST box->displaced_header = NonZero.
+ // Any non-zero value suffices:
+ // unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
+ st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes());
+ // Intentional fall-through into done
+ }
+
+ bind (done);
+}
+
+void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
+ Register Rbox, Register Rscratch,
+ bool try_bias) {
+ Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
+
+ Label done ;
+
+ if (EmitSync & 4) {
+ cmp(SP, G0);
+ return ;
+ }
+
+ if (EmitSync & 8) {
+ if (try_bias) {
+ biased_locking_exit(mark_addr, Rscratch, done);
+ }
+
+ // Test first if it is a fast recursive unlock
+ ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark);
+ br_null_short(Rmark, Assembler::pt, done);
+
+ // Check if it is still a light weight lock, this is is true if we see
+ // the stack address of the basicLock in the markOop of the object
+ assert(mark_addr.disp() == 0, "cas must take a zero displacement");
+ casx_under_lock(mark_addr.base(), Rbox, Rmark,
+ (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
+ ba(done);
+ delayed()->cmp(Rbox, Rmark);
+ bind(done);
+ return ;
+ }
+
+ // Beware ... If the aggregate size of the code emitted by CLO and CUO is
+ // is too large performance rolls abruptly off a cliff.
+ // This could be related to inlining policies, code cache management, or
+ // I$ effects.
+ Label LStacked ;
+
+ if (try_bias) {
+ // TODO: eliminate redundant LDs of obj->mark
+ biased_locking_exit(mark_addr, Rscratch, done);
+ }
+
+ ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark);
+ ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch);
+ andcc(Rscratch, Rscratch, G0);
+ brx(Assembler::zero, false, Assembler::pn, done);
+ delayed()->nop(); // consider: relocate fetch of mark, above, into this DS
+ andcc(Rmark, 2, G0);
+ brx(Assembler::zero, false, Assembler::pt, LStacked);
+ delayed()->nop();
+
+ // It's inflated
+ // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
+ // the ST of 0 into _owner which releases the lock. This prevents loads
+ // and stores within the critical section from reordering (floating)
+ // past the store that releases the lock. But TSO is a strong memory model
+ // and that particular flavor of barrier is a noop, so we can safely elide it.
+ // Note that we use 1-0 locking by default for the inflated case. We
+ // close the resultant (and rare) race by having contented threads in
+ // monitorenter periodically poll _owner.
+ ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
+ ld_ptr(Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox);
+ xor3(Rscratch, G2_thread, Rscratch);
+ orcc(Rbox, Rscratch, Rbox);
+ brx(Assembler::notZero, false, Assembler::pn, done);
+ delayed()->
+ ld_ptr(Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch);
+ ld_ptr(Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox);
+ orcc(Rbox, Rscratch, G0);
+ if (EmitSync & 65536) {
+ Label LSucc ;
+ brx(Assembler::notZero, false, Assembler::pn, LSucc);
+ delayed()->nop();
+ ba(done);
+ delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
+
+ bind(LSucc);
+ st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
+ if (os::is_MP()) { membar (StoreLoad); }
+ ld_ptr(Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch);
+ andcc(Rscratch, Rscratch, G0);
+ brx(Assembler::notZero, false, Assembler::pt, done);
+ delayed()->andcc(G0, G0, G0);
+ add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
+ mov(G2_thread, Rscratch);
+ casn(Rmark, G0, Rscratch);
+ // invert icc.zf and goto done
+ br_notnull(Rscratch, false, Assembler::pt, done);
+ delayed()->cmp(G0, G0);
+ ba(done);
+ delayed()->cmp(G0, 1);
+ } else {
+ brx(Assembler::notZero, false, Assembler::pn, done);
+ delayed()->nop();
+ ba(done);
+ delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
+ }
+
+ bind (LStacked);
+ // Consider: we could replace the expensive CAS in the exit
+ // path with a simple ST of the displaced mark value fetched from
+ // the on-stack basiclock box. That admits a race where a thread T2
+ // in the slow lock path -- inflating with monitor M -- could race a
+ // thread T1 in the fast unlock path, resulting in a missed wakeup for T2.
+ // More precisely T1 in the stack-lock unlock path could "stomp" the
+ // inflated mark value M installed by T2, resulting in an orphan
+ // object monitor M and T2 becoming stranded. We can remedy that situation
+ // by having T2 periodically poll the object's mark word using timed wait
+ // operations. If T2 discovers that a stomp has occurred it vacates
+ // the monitor M and wakes any other threads stranded on the now-orphan M.
+ // In addition the monitor scavenger, which performs deflation,
+ // would also need to check for orpan monitors and stranded threads.
+ //
+ // Finally, inflation is also used when T2 needs to assign a hashCode
+ // to O and O is stack-locked by T1. The "stomp" race could cause
+ // an assigned hashCode value to be lost. We can avoid that condition
+ // and provide the necessary hashCode stability invariants by ensuring
+ // that hashCode generation is idempotent between copying GCs.
+ // For example we could compute the hashCode of an object O as
+ // O's heap address XOR some high quality RNG value that is refreshed
+ // at GC-time. The monitor scavenger would install the hashCode
+ // found in any orphan monitors. Again, the mechanism admits a
+ // lost-update "stomp" WAW race but detects and recovers as needed.
+ //
+ // A prototype implementation showed excellent results, although
+ // the scavenger and timeout code was rather involved.
+
+ casn(mark_addr.base(), Rbox, Rscratch);
+ cmp(Rbox, Rscratch);
+ // Intentional fall through into done ...
+
+ bind(done);
+}
+
+
+
+void MacroAssembler::print_CPU_state() {
+ // %%%%% need to implement this
+}
+
+void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
+ // %%%%% need to implement this
+}
+
+void MacroAssembler::push_IU_state() {
+ // %%%%% need to implement this
+}
+
+
+void MacroAssembler::pop_IU_state() {
+ // %%%%% need to implement this
+}
+
+
+void MacroAssembler::push_FPU_state() {
+ // %%%%% need to implement this
+}
+
+
+void MacroAssembler::pop_FPU_state() {
+ // %%%%% need to implement this
+}
+
+
+void MacroAssembler::push_CPU_state() {
+ // %%%%% need to implement this
+}
+
+
+void MacroAssembler::pop_CPU_state() {
+ // %%%%% need to implement this
+}
+
+
+
+void MacroAssembler::verify_tlab() {
+#ifdef ASSERT
+ if (UseTLAB && VerifyOops) {
+ Label next, next2, ok;
+ Register t1 = L0;
+ Register t2 = L1;
+ Register t3 = L2;
+
+ save_frame(0);
+ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
+ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2);
+ or3(t1, t2, t3);
+ cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next);
+ STOP("assert(top >= start)");
+ should_not_reach_here();
+
+ bind(next);
+ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
+ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2);
+ or3(t3, t2, t3);
+ cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2);
+ STOP("assert(top <= end)");
+ should_not_reach_here();
+
+ bind(next2);
+ and3(t3, MinObjAlignmentInBytesMask, t3);
+ cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok);
+ STOP("assert(aligned)");
+ should_not_reach_here();
+
+ bind(ok);
+ restore();
+ }
+#endif
+}
+
+
+void MacroAssembler::eden_allocate(
+ Register obj, // result: pointer to object after successful allocation
+ Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
+ int con_size_in_bytes, // object size in bytes if known at compile time
+ Register t1, // temp register
+ Register t2, // temp register
+ Label& slow_case // continuation point if fast allocation fails
+){
+ // make sure arguments make sense
+ assert_different_registers(obj, var_size_in_bytes, t1, t2);
+ assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size");
+ assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
+
+ if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
+ // No allocation in the shared eden.
+ ba_short(slow_case);
+ } else {
+ // get eden boundaries
+ // note: we need both top & top_addr!
+ const Register top_addr = t1;
+ const Register end = t2;
+
+ CollectedHeap* ch = Universe::heap();
+ set((intx)ch->top_addr(), top_addr);
+ intx delta = (intx)ch->end_addr() - (intx)ch->top_addr();
+ ld_ptr(top_addr, delta, end);
+ ld_ptr(top_addr, 0, obj);
+
+ // try to allocate
+ Label retry;
+ bind(retry);
+#ifdef ASSERT
+ // make sure eden top is properly aligned
+ {
+ Label L;
+ btst(MinObjAlignmentInBytesMask, obj);
+ br(Assembler::zero, false, Assembler::pt, L);
+ delayed()->nop();
+ STOP("eden top is not properly aligned");
+ bind(L);
+ }
+#endif // ASSERT
+ const Register free = end;
+ sub(end, obj, free); // compute amount of free space
+ if (var_size_in_bytes->is_valid()) {
+ // size is unknown at compile time
+ cmp(free, var_size_in_bytes);
+ br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
+ delayed()->add(obj, var_size_in_bytes, end);
+ } else {
+ // size is known at compile time
+ cmp(free, con_size_in_bytes);
+ br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
+ delayed()->add(obj, con_size_in_bytes, end);
+ }
+ // Compare obj with the value at top_addr; if still equal, swap the value of
+ // end with the value at top_addr. If not equal, read the value at top_addr
+ // into end.
+ casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
+ // if someone beat us on the allocation, try again, otherwise continue
+ cmp(obj, end);
+ brx(Assembler::notEqual, false, Assembler::pn, retry);
+ delayed()->mov(end, obj); // nop if successfull since obj == end
+
+#ifdef ASSERT
+ // make sure eden top is properly aligned
+ {
+ Label L;
+ const Register top_addr = t1;
+
+ set((intx)ch->top_addr(), top_addr);
+ ld_ptr(top_addr, 0, top_addr);
+ btst(MinObjAlignmentInBytesMask, top_addr);
+ br(Assembler::zero, false, Assembler::pt, L);
+ delayed()->nop();
+ STOP("eden top is not properly aligned");
+ bind(L);
+ }
+#endif // ASSERT
+ }
+}
+
+
+void MacroAssembler::tlab_allocate(
+ Register obj, // result: pointer to object after successful allocation
+ Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
+ int con_size_in_bytes, // object size in bytes if known at compile time
+ Register t1, // temp register
+ Label& slow_case // continuation point if fast allocation fails
+){
+ // make sure arguments make sense
+ assert_different_registers(obj, var_size_in_bytes, t1);
+ assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size");
+ assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
+
+ const Register free = t1;
+
+ verify_tlab();
+
+ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj);
+
+ // calculate amount of free space
+ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free);
+ sub(free, obj, free);
+
+ Label done;
+ if (var_size_in_bytes == noreg) {
+ cmp(free, con_size_in_bytes);
+ } else {
+ cmp(free, var_size_in_bytes);
+ }
+ br(Assembler::less, false, Assembler::pn, slow_case);
+ // calculate the new top pointer
+ if (var_size_in_bytes == noreg) {
+ delayed()->add(obj, con_size_in_bytes, free);
+ } else {
+ delayed()->add(obj, var_size_in_bytes, free);
+ }
+
+ bind(done);
+
+#ifdef ASSERT
+ // make sure new free pointer is properly aligned
+ {
+ Label L;
+ btst(MinObjAlignmentInBytesMask, free);
+ br(Assembler::zero, false, Assembler::pt, L);
+ delayed()->nop();
+ STOP("updated TLAB free is not properly aligned");
+ bind(L);
+ }
+#endif // ASSERT
+
+ // update the tlab top pointer
+ st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
+ verify_tlab();
+}
+
+
+void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) {
+ Register top = O0;
+ Register t1 = G1;
+ Register t2 = G3;
+ Register t3 = O1;
+ assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */);
+ Label do_refill, discard_tlab;
+
+ if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
+ // No allocation in the shared eden.
+ ba_short(slow_case);
+ }
+
+ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top);
+ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1);
+ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2);
+
+ // calculate amount of free space
+ sub(t1, top, t1);
+ srl_ptr(t1, LogHeapWordSize, t1);
+
+ // Retain tlab and allocate object in shared space if
+ // the amount free in the tlab is too large to discard.
+ cmp(t1, t2);
+ brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab);
+
+ // increment waste limit to prevent getting stuck on this slow path
+ delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2);
+ st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
+ if (TLABStats) {
+ // increment number of slow_allocations
+ ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2);
+ add(t2, 1, t2);
+ stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()));
+ }
+ ba_short(try_eden);
+
+ bind(discard_tlab);
+ if (TLABStats) {
+ // increment number of refills
+ ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2);
+ add(t2, 1, t2);
+ stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()));
+ // accumulate wastage
+ ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2);
+ add(t2, t1, t2);
+ stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()));
+ }
+
+ // if tlab is currently allocated (top or end != null) then
+ // fill [top, end + alignment_reserve) with array object
+ br_null_short(top, Assembler::pn, do_refill);
+
+ set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2);
+ st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word
+ // set klass to intArrayKlass
+ sub(t1, typeArrayOopDesc::header_size(T_INT), t1);
+ add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1);
+ sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1);
+ st(t1, top, arrayOopDesc::length_offset_in_bytes());
+ set((intptr_t)Universe::intArrayKlassObj_addr(), t2);
+ ld_ptr(t2, 0, t2);
+ // store klass last. concurrent gcs assumes klass length is valid if
+ // klass field is not null.
+ store_klass(t2, top);
+ verify_oop(top);
+
+ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1);
+ sub(top, t1, t1); // size of tlab's allocated portion
+ incr_allocated_bytes(t1, t2, t3);
+
+ // refill the tlab with an eden allocation
+ bind(do_refill);
+ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1);
+ sll_ptr(t1, LogHeapWordSize, t1);
+ // allocate new tlab, address returned in top
+ eden_allocate(top, t1, 0, t2, t3, slow_case);
+
+ st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset()));
+ st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
+#ifdef ASSERT
+ // check that tlab_size (t1) is still valid
+ {
+ Label ok;
+ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2);
+ sll_ptr(t2, LogHeapWordSize, t2);
+ cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok);
+ STOP("assert(t1 == tlab_size)");
+ should_not_reach_here();
+
+ bind(ok);
+ }
+#endif // ASSERT
+ add(top, t1, top); // t1 is tlab_size
+ sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top);
+ st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset()));
+ verify_tlab();
+ ba_short(retry);
+}
+
+void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes,
+ Register t1, Register t2) {
+ // Bump total bytes allocated by this thread
+ assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch
+ assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2);
+ // v8 support has gone the way of the dodo
+ ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1);
+ add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1);
+ stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset()));
+}
+
+Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
+ switch (cond) {
+ // Note some conditions are synonyms for others
+ case Assembler::never: return Assembler::always;
+ case Assembler::zero: return Assembler::notZero;
+ case Assembler::lessEqual: return Assembler::greater;
+ case Assembler::less: return Assembler::greaterEqual;
+ case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned;
+ case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned;
+ case Assembler::negative: return Assembler::positive;
+ case Assembler::overflowSet: return Assembler::overflowClear;
+ case Assembler::always: return Assembler::never;
+ case Assembler::notZero: return Assembler::zero;
+ case Assembler::greater: return Assembler::lessEqual;
+ case Assembler::greaterEqual: return Assembler::less;
+ case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned;
+ case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned;
+ case Assembler::positive: return Assembler::negative;
+ case Assembler::overflowClear: return Assembler::overflowSet;
+ }
+
+ ShouldNotReachHere(); return Assembler::overflowClear;
+}
+
+void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr,
+ Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) {
+ Condition negated_cond = negate_condition(cond);
+ Label L;
+ brx(negated_cond, false, Assembler::pt, L);
+ delayed()->nop();
+ inc_counter(counter_ptr, Rtmp1, Rtmp2);
+ bind(L);
+}
+
+void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) {
+ AddressLiteral addrlit(counter_addr);
+ sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register.
+ Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits.
+ ld(addr, Rtmp2);
+ inc(Rtmp2);
+ st(Rtmp2, addr);
+}
+
+void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) {
+ inc_counter((address) counter_addr, Rtmp1, Rtmp2);
+}
+
+SkipIfEqual::SkipIfEqual(
+ MacroAssembler* masm, Register temp, const bool* flag_addr,
+ Assembler::Condition condition) {
+ _masm = masm;
+ AddressLiteral flag(flag_addr);
+ _masm->sethi(flag, temp);
+ _masm->ldub(temp, flag.low10(), temp);
+ _masm->tst(temp);
+ _masm->br(condition, false, Assembler::pt, _label);
+ _masm->delayed()->nop();
+}
+
+SkipIfEqual::~SkipIfEqual() {
+ _masm->bind(_label);
+}
+
+
+// Writes to stack successive pages until offset reached to check for
+// stack overflow + shadow pages. This clobbers tsp and scratch.
+void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp,
+ Register Rscratch) {
+ // Use stack pointer in temp stack pointer
+ mov(SP, Rtsp);
+
+ // Bang stack for total size given plus stack shadow page size.
+ // Bang one page at a time because a large size can overflow yellow and
+ // red zones (the bang will fail but stack overflow handling can't tell that
+ // it was a stack overflow bang vs a regular segv).
+ int offset = os::vm_page_size();
+ Register Roffset = Rscratch;
+
+ Label loop;
+ bind(loop);
+ set((-offset)+STACK_BIAS, Rscratch);
+ st(G0, Rtsp, Rscratch);
+ set(offset, Roffset);
+ sub(Rsize, Roffset, Rsize);
+ cmp(Rsize, G0);
+ br(Assembler::greater, false, Assembler::pn, loop);
+ delayed()->sub(Rtsp, Roffset, Rtsp);
+
+ // Bang down shadow pages too.
+ // The -1 because we already subtracted 1 page.
+ for (int i = 0; i< StackShadowPages-1; i++) {
+ set((-i*offset)+STACK_BIAS, Rscratch);
+ st(G0, Rtsp, Rscratch);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////////
+#ifndef SERIALGC
+
+static address satb_log_enqueue_with_frame = NULL;
+static u_char* satb_log_enqueue_with_frame_end = NULL;
+
+static address satb_log_enqueue_frameless = NULL;
+static u_char* satb_log_enqueue_frameless_end = NULL;
+
+static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions?
+
+static void generate_satb_log_enqueue(bool with_frame) {
+ BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize);
+ CodeBuffer buf(bb);
+ MacroAssembler masm(&buf);
+
+#define __ masm.
+
+ address start = __ pc();
+ Register pre_val;
+
+ Label refill, restart;
+ if (with_frame) {
+ __ save_frame(0);
+ pre_val = I0; // Was O0 before the save.
+ } else {
+ pre_val = O0;
+ }
+
+ int satb_q_index_byte_offset =
+ in_bytes(JavaThread::satb_mark_queue_offset() +
+ PtrQueue::byte_offset_of_index());
+
+ int satb_q_buf_byte_offset =
+ in_bytes(JavaThread::satb_mark_queue_offset() +
+ PtrQueue::byte_offset_of_buf());
+
+ assert(in_bytes(PtrQueue::byte_width_of_index()) == sizeof(intptr_t) &&
+ in_bytes(PtrQueue::byte_width_of_buf()) == sizeof(intptr_t),
+ "check sizes in assembly below");
+
+ __ bind(restart);
+
+ // Load the index into the SATB buffer. PtrQueue::_index is a size_t
+ // so ld_ptr is appropriate.
+ __ ld_ptr(G2_thread, satb_q_index_byte_offset, L0);
+
+ // index == 0?
+ __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill);
+
+ __ ld_ptr(G2_thread, satb_q_buf_byte_offset, L1);
+ __ sub(L0, oopSize, L0);
+
+ __ st_ptr(pre_val, L1, L0); // [_buf + index] := I0
+ if (!with_frame) {
+ // Use return-from-leaf
+ __ retl();
+ __ delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset);
+ } else {
+ // Not delayed.
+ __ st_ptr(L0, G2_thread, satb_q_index_byte_offset);
+ }
+ if (with_frame) {
+ __ ret();
+ __ delayed()->restore();
+ }
+ __ bind(refill);
+
+ address handle_zero =
+ CAST_FROM_FN_PTR(address,
+ &SATBMarkQueueSet::handle_zero_index_for_thread);
+ // This should be rare enough that we can afford to save all the
+ // scratch registers that the calling context might be using.
+ __ mov(G1_scratch, L0);
+ __ mov(G3_scratch, L1);
+ __ mov(G4, L2);
+ // We need the value of O0 above (for the write into the buffer), so we
+ // save and restore it.
+ __ mov(O0, L3);
+ // Since the call will overwrite O7, we save and restore that, as well.
+ __ mov(O7, L4);
+ __ call_VM_leaf(L5, handle_zero, G2_thread);
+ __ mov(L0, G1_scratch);
+ __ mov(L1, G3_scratch);
+ __ mov(L2, G4);
+ __ mov(L3, O0);
+ __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
+ __ delayed()->mov(L4, O7);
+
+ if (with_frame) {
+ satb_log_enqueue_with_frame = start;
+ satb_log_enqueue_with_frame_end = __ pc();
+ } else {
+ satb_log_enqueue_frameless = start;
+ satb_log_enqueue_frameless_end = __ pc();
+ }
+
+#undef __
+}
+
+static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) {
+ if (with_frame) {
+ if (satb_log_enqueue_with_frame == 0) {
+ generate_satb_log_enqueue(with_frame);
+ assert(satb_log_enqueue_with_frame != 0, "postcondition.");
+ if (G1SATBPrintStubs) {
+ tty->print_cr("Generated with-frame satb enqueue:");
+ Disassembler::decode((u_char*)satb_log_enqueue_with_frame,
+ satb_log_enqueue_with_frame_end,
+ tty);
+ }
+ }
+ } else {
+ if (satb_log_enqueue_frameless == 0) {
+ generate_satb_log_enqueue(with_frame);
+ assert(satb_log_enqueue_frameless != 0, "postcondition.");
+ if (G1SATBPrintStubs) {
+ tty->print_cr("Generated frameless satb enqueue:");
+ Disassembler::decode((u_char*)satb_log_enqueue_frameless,
+ satb_log_enqueue_frameless_end,
+ tty);
+ }
+ }
+ }
+}
+
+void MacroAssembler::g1_write_barrier_pre(Register obj,
+ Register index,
+ int offset,
+ Register pre_val,
+ Register tmp,
+ bool preserve_o_regs) {
+ Label filtered;
+
+ if (obj == noreg) {
+ // We are not loading the previous value so make
+ // sure that we don't trash the value in pre_val
+ // with the code below.
+ assert_different_registers(pre_val, tmp);
+ } else {
+ // We will be loading the previous value
+ // in this code so...
+ assert(offset == 0 || index == noreg, "choose one");
+ assert(pre_val == noreg, "check this code");
+ }
+
+ // Is marking active?
+ if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
+ ld(G2,
+ in_bytes(JavaThread::satb_mark_queue_offset() +
+ PtrQueue::byte_offset_of_active()),
+ tmp);
+ } else {
+ guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
+ "Assumption");
+ ldsb(G2,
+ in_bytes(JavaThread::satb_mark_queue_offset() +
+ PtrQueue::byte_offset_of_active()),
+ tmp);
+ }
+
+ // Is marking active?
+ cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
+
+ // Do we need to load the previous value?
+ if (obj != noreg) {
+ // Load the previous value...
+ if (index == noreg) {
+ if (Assembler::is_simm13(offset)) {
+ load_heap_oop(obj, offset, tmp);
+ } else {
+ set(offset, tmp);
+ load_heap_oop(obj, tmp, tmp);
+ }
+ } else {
+ load_heap_oop(obj, index, tmp);
+ }
+ // Previous value has been loaded into tmp
+ pre_val = tmp;
+ }
+
+ assert(pre_val != noreg, "must have a real register");
+
+ // Is the previous value null?
+ cmp_and_brx_short(pre_val, G0, Assembler::equal, Assembler::pt, filtered);
+
+ // OK, it's not filtered, so we'll need to call enqueue. In the normal
+ // case, pre_val will be a scratch G-reg, but there are some cases in
+ // which it's an O-reg. In the first case, do a normal call. In the
+ // latter, do a save here and call the frameless version.
+
+ guarantee(pre_val->is_global() || pre_val->is_out(),
+ "Or we need to think harder.");
+
+ if (pre_val->is_global() && !preserve_o_regs) {
+ generate_satb_log_enqueue_if_necessary(true); // with frame
+
+ call(satb_log_enqueue_with_frame);
+ delayed()->mov(pre_val, O0);
+ } else {
+ generate_satb_log_enqueue_if_necessary(false); // frameless
+
+ save_frame(0);
+ call(satb_log_enqueue_frameless);
+ delayed()->mov(pre_val->after_save(), O0);
+ restore();
+ }
+
+ bind(filtered);
+}
+
+static address dirty_card_log_enqueue = 0;
+static u_char* dirty_card_log_enqueue_end = 0;
+
+// This gets to assume that o0 contains the object address.
+static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
+ BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2);
+ CodeBuffer buf(bb);
+ MacroAssembler masm(&buf);
+#define __ masm.
+ address start = __ pc();
+
+ Label not_already_dirty, restart, refill;
+
+#ifdef _LP64
+ __ srlx(O0, CardTableModRefBS::card_shift, O0);
+#else
+ __ srl(O0, CardTableModRefBS::card_shift, O0);
+#endif
+ AddressLiteral addrlit(byte_map_base);
+ __ set(addrlit, O1); // O1 := <card table base>
+ __ ldub(O0, O1, O2); // O2 := [O0 + O1]
+
+ assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
+ __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
+
+ // We didn't take the branch, so we're already dirty: return.
+ // Use return-from-leaf
+ __ retl();
+ __ delayed()->nop();
+
+ // Not dirty.
+ __ bind(not_already_dirty);
+
+ // Get O0 + O1 into a reg by itself
+ __ add(O0, O1, O3);
+
+ // First, dirty it.
+ __ stb(G0, O3, G0); // [cardPtr] := 0 (i.e., dirty).
+
+ int dirty_card_q_index_byte_offset =
+ in_bytes(JavaThread::dirty_card_queue_offset() +
+ PtrQueue::byte_offset_of_index());
+ int dirty_card_q_buf_byte_offset =
+ in_bytes(JavaThread::dirty_card_queue_offset() +
+ PtrQueue::byte_offset_of_buf());
+ __ bind(restart);
+
+ // Load the index into the update buffer. PtrQueue::_index is
+ // a size_t so ld_ptr is appropriate here.
+ __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0);
+
+ // index == 0?
+ __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill);
+
+ __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1);
+ __ sub(L0, oopSize, L0);
+
+ __ st_ptr(O3, L1, L0); // [_buf + index] := I0
+ // Use return-from-leaf
+ __ retl();
+ __ delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset);
+
+ __ bind(refill);
+ address handle_zero =
+ CAST_FROM_FN_PTR(address,
+ &DirtyCardQueueSet::handle_zero_index_for_thread);
+ // This should be rare enough that we can afford to save all the
+ // scratch registers that the calling context might be using.
+ __ mov(G1_scratch, L3);
+ __ mov(G3_scratch, L5);
+ // We need the value of O3 above (for the write into the buffer), so we
+ // save and restore it.
+ __ mov(O3, L6);
+ // Since the call will overwrite O7, we save and restore that, as well.
+ __ mov(O7, L4);
+
+ __ call_VM_leaf(L7_thread_cache, handle_zero, G2_thread);
+ __ mov(L3, G1_scratch);
+ __ mov(L5, G3_scratch);
+ __ mov(L6, O3);
+ __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
+ __ delayed()->mov(L4, O7);
+
+ dirty_card_log_enqueue = start;
+ dirty_card_log_enqueue_end = __ pc();
+ // XXX Should have a guarantee here about not going off the end!
+ // Does it already do so? Do an experiment...
+
+#undef __
+
+}
+
+static inline void
+generate_dirty_card_log_enqueue_if_necessary(jbyte* byte_map_base) {
+ if (dirty_card_log_enqueue == 0) {
+ generate_dirty_card_log_enqueue(byte_map_base);
+ assert(dirty_card_log_enqueue != 0, "postcondition.");
+ if (G1SATBPrintStubs) {
+ tty->print_cr("Generated dirty_card enqueue:");
+ Disassembler::decode((u_char*)dirty_card_log_enqueue,
+ dirty_card_log_enqueue_end,
+ tty);
+ }
+ }
+}
+
+
+void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
+
+ Label filtered;
+ MacroAssembler* post_filter_masm = this;
+
+ if (new_val == G0) return;
+
+ G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
+ assert(bs->kind() == BarrierSet::G1SATBCT ||
+ bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
+
+ if (G1RSBarrierRegionFilter) {
+ xor3(store_addr, new_val, tmp);
+#ifdef _LP64
+ srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
+#else
+ srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
+#endif
+
+ // XXX Should I predict this taken or not? Does it matter?
+ cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
+ }
+
+ // If the "store_addr" register is an "in" or "local" register, move it to
+ // a scratch reg so we can pass it as an argument.
+ bool use_scr = !(store_addr->is_global() || store_addr->is_out());
+ // Pick a scratch register different from "tmp".
+ Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch);
+ // Make sure we use up the delay slot!
+ if (use_scr) {
+ post_filter_masm->mov(store_addr, scr);
+ } else {
+ post_filter_masm->nop();
+ }
+ generate_dirty_card_log_enqueue_if_necessary(bs->byte_map_base);
+ save_frame(0);
+ call(dirty_card_log_enqueue);
+ if (use_scr) {
+ delayed()->mov(scr, O0);
+ } else {
+ delayed()->mov(store_addr->after_save(), O0);
+ }
+ restore();
+
+ bind(filtered);
+}
+
+#endif // SERIALGC
+///////////////////////////////////////////////////////////////////////////////////
+
+void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
+ // If we're writing constant NULL, we can skip the write barrier.
+ if (new_val == G0) return;
+ CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
+ assert(bs->kind() == BarrierSet::CardTableModRef ||
+ bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
+ card_table_write(bs->byte_map_base, tmp, store_addr);
+}
+
+void MacroAssembler::load_klass(Register src_oop, Register klass) {
+ // The number of bytes in this code is used by
+ // MachCallDynamicJavaNode::ret_addr_offset()
+ // if this changes, change that.
+ if (UseCompressedKlassPointers) {
+ lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass);
+ decode_klass_not_null(klass);
+ } else {
+ ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass);
+ }
+}
+
+void MacroAssembler::store_klass(Register klass, Register dst_oop) {
+ if (UseCompressedKlassPointers) {
+ assert(dst_oop != klass, "not enough registers");
+ encode_klass_not_null(klass);
+ st(klass, dst_oop, oopDesc::klass_offset_in_bytes());
+ } else {
+ st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes());
+ }
+}
+
+void MacroAssembler::store_klass_gap(Register s, Register d) {
+ if (UseCompressedKlassPointers) {
+ assert(s != d, "not enough registers");
+ st(s, d, oopDesc::klass_gap_offset_in_bytes());
+ }
+}
+
+void MacroAssembler::load_heap_oop(const Address& s, Register d) {
+ if (UseCompressedOops) {
+ lduw(s, d);
+ decode_heap_oop(d);
+ } else {
+ ld_ptr(s, d);
+ }
+}
+
+void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) {
+ if (UseCompressedOops) {
+ lduw(s1, s2, d);
+ decode_heap_oop(d, d);
+ } else {
+ ld_ptr(s1, s2, d);
+ }
+}
+
+void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) {
+ if (UseCompressedOops) {
+ lduw(s1, simm13a, d);
+ decode_heap_oop(d, d);
+ } else {
+ ld_ptr(s1, simm13a, d);
+ }
+}
+
+void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d) {
+ if (s2.is_constant()) load_heap_oop(s1, s2.as_constant(), d);
+ else load_heap_oop(s1, s2.as_register(), d);
+}
+
+void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) {
+ if (UseCompressedOops) {
+ assert(s1 != d && s2 != d, "not enough registers");
+ encode_heap_oop(d);
+ st(d, s1, s2);
+ } else {
+ st_ptr(d, s1, s2);
+ }
+}
+
+void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) {
+ if (UseCompressedOops) {
+ assert(s1 != d, "not enough registers");
+ encode_heap_oop(d);
+ st(d, s1, simm13a);
+ } else {
+ st_ptr(d, s1, simm13a);
+ }
+}
+
+void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) {
+ if (UseCompressedOops) {
+ assert(a.base() != d, "not enough registers");
+ encode_heap_oop(d);
+ st(d, a, offset);
+ } else {
+ st_ptr(d, a, offset);
+ }
+}
+
+
+void MacroAssembler::encode_heap_oop(Register src, Register dst) {
+ assert (UseCompressedOops, "must be compressed");
+ assert (Universe::heap() != NULL, "java heap should be initialized");
+ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+ verify_oop(src);
+ if (Universe::narrow_oop_base() == NULL) {
+ srlx(src, LogMinObjAlignmentInBytes, dst);
+ return;
+ }
+ Label done;
+ if (src == dst) {
+ // optimize for frequent case src == dst
+ bpr(rc_nz, true, Assembler::pt, src, done);
+ delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken
+ bind(done);
+ srlx(src, LogMinObjAlignmentInBytes, dst);
+ } else {
+ bpr(rc_z, false, Assembler::pn, src, done);
+ delayed() -> mov(G0, dst);
+ // could be moved before branch, and annulate delay,
+ // but may add some unneeded work decoding null
+ sub(src, G6_heapbase, dst);
+ srlx(dst, LogMinObjAlignmentInBytes, dst);
+ bind(done);
+ }
+}
+
+
+void MacroAssembler::encode_heap_oop_not_null(Register r) {
+ assert (UseCompressedOops, "must be compressed");
+ assert (Universe::heap() != NULL, "java heap should be initialized");
+ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+ verify_oop(r);
+ if (Universe::narrow_oop_base() != NULL)
+ sub(r, G6_heapbase, r);
+ srlx(r, LogMinObjAlignmentInBytes, r);
+}
+
+void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) {
+ assert (UseCompressedOops, "must be compressed");
+ assert (Universe::heap() != NULL, "java heap should be initialized");
+ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+ verify_oop(src);
+ if (Universe::narrow_oop_base() == NULL) {
+ srlx(src, LogMinObjAlignmentInBytes, dst);
+ } else {
+ sub(src, G6_heapbase, dst);
+ srlx(dst, LogMinObjAlignmentInBytes, dst);
+ }
+}
+
+// Same algorithm as oops.inline.hpp decode_heap_oop.
+void MacroAssembler::decode_heap_oop(Register src, Register dst) {
+ assert (UseCompressedOops, "must be compressed");
+ assert (Universe::heap() != NULL, "java heap should be initialized");
+ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+ sllx(src, LogMinObjAlignmentInBytes, dst);
+ if (Universe::narrow_oop_base() != NULL) {
+ Label done;
+ bpr(rc_nz, true, Assembler::pt, dst, done);
+ delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken
+ bind(done);
+ }
+ verify_oop(dst);
+}
+
+void MacroAssembler::decode_heap_oop_not_null(Register r) {
+ // Do not add assert code to this unless you change vtableStubs_sparc.cpp
+ // pd_code_size_limit.
+ // Also do not verify_oop as this is called by verify_oop.
+ assert (UseCompressedOops, "must be compressed");
+ assert (Universe::heap() != NULL, "java heap should be initialized");
+ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+ sllx(r, LogMinObjAlignmentInBytes, r);
+ if (Universe::narrow_oop_base() != NULL)
+ add(r, G6_heapbase, r);
+}
+
+void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) {
+ // Do not add assert code to this unless you change vtableStubs_sparc.cpp
+ // pd_code_size_limit.
+ // Also do not verify_oop as this is called by verify_oop.
+ assert (UseCompressedOops, "must be compressed");
+ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+ sllx(src, LogMinObjAlignmentInBytes, dst);
+ if (Universe::narrow_oop_base() != NULL)
+ add(dst, G6_heapbase, dst);
+}
+
+void MacroAssembler::encode_klass_not_null(Register r) {
+ assert(Metaspace::is_initialized(), "metaspace should be initialized");
+ assert (UseCompressedKlassPointers, "must be compressed");
+ assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+ if (Universe::narrow_klass_base() != NULL)
+ sub(r, G6_heapbase, r);
+ srlx(r, LogKlassAlignmentInBytes, r);
+}
+
+void MacroAssembler::encode_klass_not_null(Register src, Register dst) {
+ assert(Metaspace::is_initialized(), "metaspace should be initialized");
+ assert (UseCompressedKlassPointers, "must be compressed");
+ assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+ if (Universe::narrow_klass_base() == NULL) {
+ srlx(src, LogKlassAlignmentInBytes, dst);
+ } else {
+ sub(src, G6_heapbase, dst);
+ srlx(dst, LogKlassAlignmentInBytes, dst);
+ }
+}
+
+void MacroAssembler::decode_klass_not_null(Register r) {
+ assert(Metaspace::is_initialized(), "metaspace should be initialized");
+ // Do not add assert code to this unless you change vtableStubs_sparc.cpp
+ // pd_code_size_limit.
+ assert (UseCompressedKlassPointers, "must be compressed");
+ assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+ sllx(r, LogKlassAlignmentInBytes, r);
+ if (Universe::narrow_klass_base() != NULL)
+ add(r, G6_heapbase, r);
+}
+
+void MacroAssembler::decode_klass_not_null(Register src, Register dst) {
+ assert(Metaspace::is_initialized(), "metaspace should be initialized");
+ // Do not add assert code to this unless you change vtableStubs_sparc.cpp
+ // pd_code_size_limit.
+ assert (UseCompressedKlassPointers, "must be compressed");
+ assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+ sllx(src, LogKlassAlignmentInBytes, dst);
+ if (Universe::narrow_klass_base() != NULL)
+ add(dst, G6_heapbase, dst);
+}
+
+void MacroAssembler::reinit_heapbase() {
+ if (UseCompressedOops || UseCompressedKlassPointers) {
+ AddressLiteral base(Universe::narrow_ptrs_base_addr());
+ load_ptr_contents(base, G6_heapbase);
+ }
+}
+
+// Compare char[] arrays aligned to 4 bytes.
+void MacroAssembler::char_arrays_equals(Register ary1, Register ary2,
+ Register limit, Register result,
+ Register chr1, Register chr2, Label& Ldone) {
+ Label Lvector, Lloop;
+ assert(chr1 == result, "should be the same");
+
+ // Note: limit contains number of bytes (2*char_elements) != 0.
+ andcc(limit, 0x2, chr1); // trailing character ?
+ br(Assembler::zero, false, Assembler::pt, Lvector);
+ delayed()->nop();
+
+ // compare the trailing char
+ sub(limit, sizeof(jchar), limit);
+ lduh(ary1, limit, chr1);
+ lduh(ary2, limit, chr2);
+ cmp(chr1, chr2);
+ br(Assembler::notEqual, true, Assembler::pt, Ldone);
+ delayed()->mov(G0, result); // not equal
+
+ // only one char ?
+ cmp_zero_and_br(zero, limit, Ldone, true, Assembler::pn);
+ delayed()->add(G0, 1, result); // zero-length arrays are equal
+
+ // word by word compare, dont't need alignment check
+ bind(Lvector);
+ // Shift ary1 and ary2 to the end of the arrays, negate limit
+ add(ary1, limit, ary1);
+ add(ary2, limit, ary2);
+ neg(limit, limit);
+
+ lduw(ary1, limit, chr1);
+ bind(Lloop);
+ lduw(ary2, limit, chr2);
+ cmp(chr1, chr2);
+ br(Assembler::notEqual, true, Assembler::pt, Ldone);
+ delayed()->mov(G0, result); // not equal
+ inccc(limit, 2*sizeof(jchar));
+ // annul LDUW if branch is not taken to prevent access past end of array
+ br(Assembler::notZero, true, Assembler::pt, Lloop);
+ delayed()->lduw(ary1, limit, chr1); // hoisted
+
+ // Caller should set it:
+ // add(G0, 1, result); // equals
+}
+
+// Use BIS for zeroing (count is in bytes).
+void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) {
+ assert(UseBlockZeroing && VM_Version::has_block_zeroing(), "only works with BIS zeroing");
+ Register end = count;
+ int cache_line_size = VM_Version::prefetch_data_size();
+ // Minimum count when BIS zeroing can be used since
+ // it needs membar which is expensive.
+ int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit);
+
+ Label small_loop;
+ // Check if count is negative (dead code) or zero.
+ // Note, count uses 64bit in 64 bit VM.
+ cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone);
+
+ // Use BIS zeroing only for big arrays since it requires membar.
+ if (Assembler::is_simm13(block_zero_size)) { // < 4096
+ cmp(count, block_zero_size);
+ } else {
+ set(block_zero_size, temp);
+ cmp(count, temp);
+ }
+ br(Assembler::lessUnsigned, false, Assembler::pt, small_loop);
+ delayed()->add(to, count, end);
+
+ // Note: size is >= three (32 bytes) cache lines.
+
+ // Clean the beginning of space up to next cache line.
+ for (int offs = 0; offs < cache_line_size; offs += 8) {
+ stx(G0, to, offs);
+ }
+
+ // align to next cache line
+ add(to, cache_line_size, to);
+ and3(to, -cache_line_size, to);
+
+ // Note: size left >= two (32 bytes) cache lines.
+
+ // BIS should not be used to zero tail (64 bytes)
+ // to avoid zeroing a header of the following object.
+ sub(end, (cache_line_size*2)-8, end);
+
+ Label bis_loop;
+ bind(bis_loop);
+ stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
+ add(to, cache_line_size, to);
+ cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop);
+
+ // BIS needs membar.
+ membar(Assembler::StoreLoad);
+
+ add(end, (cache_line_size*2)-8, end); // restore end
+ cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone);
+
+ // Clean the tail.
+ bind(small_loop);
+ stx(G0, to, 0);
+ add(to, 8, to);
+ cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop);
+ nop(); // Separate short branches
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,1504 @@
+/*
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_SPARC_VM_MACROASSEMBLER_SPARC_HPP
+#define CPU_SPARC_VM_MACROASSEMBLER_SPARC_HPP
+
+#include "asm/assembler.hpp"
+
+// <sys/trap.h> promises that the system will not use traps 16-31
+#define ST_RESERVED_FOR_USER_0 0x10
+
+class BiasedLockingCounters;
+
+
+// Register aliases for parts of the system:
+
+// 64 bit values can be kept in g1-g5, o1-o5 and o7 and all 64 bits are safe
+// across context switches in V8+ ABI. Of course, there are no 64 bit regs
+// in V8 ABI. All 64 bits are preserved in V9 ABI for all registers.
+
+// g2-g4 are scratch registers called "application globals". Their
+// meaning is reserved to the "compilation system"--which means us!
+// They are are not supposed to be touched by ordinary C code, although
+// highly-optimized C code might steal them for temps. They are safe
+// across thread switches, and the ABI requires that they be safe
+// across function calls.
+//
+// g1 and g3 are touched by more modules. V8 allows g1 to be clobbered
+// across func calls, and V8+ also allows g5 to be clobbered across
+// func calls. Also, g1 and g5 can get touched while doing shared
+// library loading.
+//
+// We must not touch g7 (it is the thread-self register) and g6 is
+// reserved for certain tools. g0, of course, is always zero.
+//
+// (Sources: SunSoft Compilers Group, thread library engineers.)
+
+// %%%% The interpreter should be revisited to reduce global scratch regs.
+
+// This global always holds the current JavaThread pointer:
+
+REGISTER_DECLARATION(Register, G2_thread , G2);
+REGISTER_DECLARATION(Register, G6_heapbase , G6);
+
+// The following globals are part of the Java calling convention:
+
+REGISTER_DECLARATION(Register, G5_method , G5);
+REGISTER_DECLARATION(Register, G5_megamorphic_method , G5_method);
+REGISTER_DECLARATION(Register, G5_inline_cache_reg , G5_method);
+
+// The following globals are used for the new C1 & interpreter calling convention:
+REGISTER_DECLARATION(Register, Gargs , G4); // pointing to the last argument
+
+// This local is used to preserve G2_thread in the interpreter and in stubs:
+REGISTER_DECLARATION(Register, L7_thread_cache , L7);
+
+// These globals are used as scratch registers in the interpreter:
+
+REGISTER_DECLARATION(Register, Gframe_size , G1); // SAME REG as G1_scratch
+REGISTER_DECLARATION(Register, G1_scratch , G1); // also SAME
+REGISTER_DECLARATION(Register, G3_scratch , G3);
+REGISTER_DECLARATION(Register, G4_scratch , G4);
+
+// These globals are used as short-lived scratch registers in the compiler:
+
+REGISTER_DECLARATION(Register, Gtemp , G5);
+
+// JSR 292 fixed register usages:
+REGISTER_DECLARATION(Register, G5_method_type , G5);
+REGISTER_DECLARATION(Register, G3_method_handle , G3);
+REGISTER_DECLARATION(Register, L7_mh_SP_save , L7);
+
+// The compiler requires that G5_megamorphic_method is G5_inline_cache_klass,
+// because a single patchable "set" instruction (NativeMovConstReg,
+// or NativeMovConstPatching for compiler1) instruction
+// serves to set up either quantity, depending on whether the compiled
+// call site is an inline cache or is megamorphic. See the function
+// CompiledIC::set_to_megamorphic.
+//
+// If a inline cache targets an interpreted method, then the
+// G5 register will be used twice during the call. First,
+// the call site will be patched to load a compiledICHolder
+// into G5. (This is an ordered pair of ic_klass, method.)
+// The c2i adapter will first check the ic_klass, then load
+// G5_method with the method part of the pair just before
+// jumping into the interpreter.
+//
+// Note that G5_method is only the method-self for the interpreter,
+// and is logically unrelated to G5_megamorphic_method.
+//
+// Invariants on G2_thread (the JavaThread pointer):
+// - it should not be used for any other purpose anywhere
+// - it must be re-initialized by StubRoutines::call_stub()
+// - it must be preserved around every use of call_VM
+
+// We can consider using g2/g3/g4 to cache more values than the
+// JavaThread, such as the card-marking base or perhaps pointers into
+// Eden. It's something of a waste to use them as scratch temporaries,
+// since they are not supposed to be volatile. (Of course, if we find
+// that Java doesn't benefit from application globals, then we can just
+// use them as ordinary temporaries.)
+//
+// Since g1 and g5 (and/or g6) are the volatile (caller-save) registers,
+// it makes sense to use them routinely for procedure linkage,
+// whenever the On registers are not applicable. Examples: G5_method,
+// G5_inline_cache_klass, and a double handful of miscellaneous compiler
+// stubs. This means that compiler stubs, etc., should be kept to a
+// maximum of two or three G-register arguments.
+
+
+// stub frames
+
+REGISTER_DECLARATION(Register, Lentry_args , L0); // pointer to args passed to callee (interpreter) not stub itself
+
+// Interpreter frames
+
+#ifdef CC_INTERP
+REGISTER_DECLARATION(Register, Lstate , L0); // interpreter state object pointer
+REGISTER_DECLARATION(Register, L1_scratch , L1); // scratch
+REGISTER_DECLARATION(Register, Lmirror , L1); // mirror (for native methods only)
+REGISTER_DECLARATION(Register, L2_scratch , L2);
+REGISTER_DECLARATION(Register, L3_scratch , L3);
+REGISTER_DECLARATION(Register, L4_scratch , L4);
+REGISTER_DECLARATION(Register, Lscratch , L5); // C1 uses
+REGISTER_DECLARATION(Register, Lscratch2 , L6); // C1 uses
+REGISTER_DECLARATION(Register, L7_scratch , L7); // constant pool cache
+REGISTER_DECLARATION(Register, O5_savedSP , O5);
+REGISTER_DECLARATION(Register, I5_savedSP , I5); // Saved SP before bumping for locals. This is simply
+ // a copy SP, so in 64-bit it's a biased value. The bias
+ // is added and removed as needed in the frame code.
+// Interface to signature handler
+REGISTER_DECLARATION(Register, Llocals , L7); // pointer to locals for signature handler
+REGISTER_DECLARATION(Register, Lmethod , L6); // Method* when calling signature handler
+
+#else
+REGISTER_DECLARATION(Register, Lesp , L0); // expression stack pointer
+REGISTER_DECLARATION(Register, Lbcp , L1); // pointer to next bytecode
+REGISTER_DECLARATION(Register, Lmethod , L2);
+REGISTER_DECLARATION(Register, Llocals , L3);
+REGISTER_DECLARATION(Register, Largs , L3); // pointer to locals for signature handler
+ // must match Llocals in asm interpreter
+REGISTER_DECLARATION(Register, Lmonitors , L4);
+REGISTER_DECLARATION(Register, Lbyte_code , L5);
+// When calling out from the interpreter we record SP so that we can remove any extra stack
+// space allocated during adapter transitions. This register is only live from the point
+// of the call until we return.
+REGISTER_DECLARATION(Register, Llast_SP , L5);
+REGISTER_DECLARATION(Register, Lscratch , L5);
+REGISTER_DECLARATION(Register, Lscratch2 , L6);
+REGISTER_DECLARATION(Register, LcpoolCache , L6); // constant pool cache
+
+REGISTER_DECLARATION(Register, O5_savedSP , O5);
+REGISTER_DECLARATION(Register, I5_savedSP , I5); // Saved SP before bumping for locals. This is simply
+ // a copy SP, so in 64-bit it's a biased value. The bias
+ // is added and removed as needed in the frame code.
+REGISTER_DECLARATION(Register, IdispatchTables , I4); // Base address of the bytecode dispatch tables
+REGISTER_DECLARATION(Register, IdispatchAddress , I3); // Register which saves the dispatch address for each bytecode
+REGISTER_DECLARATION(Register, ImethodDataPtr , I2); // Pointer to the current method data
+#endif /* CC_INTERP */
+
+// NOTE: Lscratch2 and LcpoolCache point to the same registers in
+// the interpreter code. If Lscratch2 needs to be used for some
+// purpose than LcpoolCache should be restore after that for
+// the interpreter to work right
+// (These assignments must be compatible with L7_thread_cache; see above.)
+
+// Since Lbcp points into the middle of the method object,
+// it is temporarily converted into a "bcx" during GC.
+
+// Exception processing
+// These registers are passed into exception handlers.
+// All exception handlers require the exception object being thrown.
+// In addition, an nmethod's exception handler must be passed
+// the address of the call site within the nmethod, to allow
+// proper selection of the applicable catch block.
+// (Interpreter frames use their own bcp() for this purpose.)
+//
+// The Oissuing_pc value is not always needed. When jumping to a
+// handler that is known to be interpreted, the Oissuing_pc value can be
+// omitted. An actual catch block in compiled code receives (from its
+// nmethod's exception handler) the thrown exception in the Oexception,
+// but it doesn't need the Oissuing_pc.
+//
+// If an exception handler (either interpreted or compiled)
+// discovers there is no applicable catch block, it updates
+// the Oissuing_pc to the continuation PC of its own caller,
+// pops back to that caller's stack frame, and executes that
+// caller's exception handler. Obviously, this process will
+// iterate until the control stack is popped back to a method
+// containing an applicable catch block. A key invariant is
+// that the Oissuing_pc value is always a value local to
+// the method whose exception handler is currently executing.
+//
+// Note: The issuing PC value is __not__ a raw return address (I7 value).
+// It is a "return pc", the address __following__ the call.
+// Raw return addresses are converted to issuing PCs by frame::pc(),
+// or by stubs. Issuing PCs can be used directly with PC range tables.
+//
+REGISTER_DECLARATION(Register, Oexception , O0); // exception being thrown
+REGISTER_DECLARATION(Register, Oissuing_pc , O1); // where the exception is coming from
+
+
+// These must occur after the declarations above
+#ifndef DONT_USE_REGISTER_DEFINES
+
+#define Gthread AS_REGISTER(Register, Gthread)
+#define Gmethod AS_REGISTER(Register, Gmethod)
+#define Gmegamorphic_method AS_REGISTER(Register, Gmegamorphic_method)
+#define Ginline_cache_reg AS_REGISTER(Register, Ginline_cache_reg)
+#define Gargs AS_REGISTER(Register, Gargs)
+#define Lthread_cache AS_REGISTER(Register, Lthread_cache)
+#define Gframe_size AS_REGISTER(Register, Gframe_size)
+#define Gtemp AS_REGISTER(Register, Gtemp)
+
+#ifdef CC_INTERP
+#define Lstate AS_REGISTER(Register, Lstate)
+#define Lesp AS_REGISTER(Register, Lesp)
+#define L1_scratch AS_REGISTER(Register, L1_scratch)
+#define Lmirror AS_REGISTER(Register, Lmirror)
+#define L2_scratch AS_REGISTER(Register, L2_scratch)
+#define L3_scratch AS_REGISTER(Register, L3_scratch)
+#define L4_scratch AS_REGISTER(Register, L4_scratch)
+#define Lscratch AS_REGISTER(Register, Lscratch)
+#define Lscratch2 AS_REGISTER(Register, Lscratch2)
+#define L7_scratch AS_REGISTER(Register, L7_scratch)
+#define Ostate AS_REGISTER(Register, Ostate)
+#else
+#define Lesp AS_REGISTER(Register, Lesp)
+#define Lbcp AS_REGISTER(Register, Lbcp)
+#define Lmethod AS_REGISTER(Register, Lmethod)
+#define Llocals AS_REGISTER(Register, Llocals)
+#define Lmonitors AS_REGISTER(Register, Lmonitors)
+#define Lbyte_code AS_REGISTER(Register, Lbyte_code)
+#define Lscratch AS_REGISTER(Register, Lscratch)
+#define Lscratch2 AS_REGISTER(Register, Lscratch2)
+#define LcpoolCache AS_REGISTER(Register, LcpoolCache)
+#endif /* ! CC_INTERP */
+
+#define Lentry_args AS_REGISTER(Register, Lentry_args)
+#define I5_savedSP AS_REGISTER(Register, I5_savedSP)
+#define O5_savedSP AS_REGISTER(Register, O5_savedSP)
+#define IdispatchAddress AS_REGISTER(Register, IdispatchAddress)
+#define ImethodDataPtr AS_REGISTER(Register, ImethodDataPtr)
+#define IdispatchTables AS_REGISTER(Register, IdispatchTables)
+
+#define Oexception AS_REGISTER(Register, Oexception)
+#define Oissuing_pc AS_REGISTER(Register, Oissuing_pc)
+
+#endif
+
+
+// Address is an abstraction used to represent a memory location.
+//
+// Note: A register location is represented via a Register, not
+// via an address for efficiency & simplicity reasons.
+
+class Address VALUE_OBJ_CLASS_SPEC {
+ private:
+ Register _base; // Base register.
+ RegisterOrConstant _index_or_disp; // Index register or constant displacement.
+ RelocationHolder _rspec;
+
+ public:
+ Address() : _base(noreg), _index_or_disp(noreg) {}
+
+ Address(Register base, RegisterOrConstant index_or_disp)
+ : _base(base),
+ _index_or_disp(index_or_disp) {
+ }
+
+ Address(Register base, Register index)
+ : _base(base),
+ _index_or_disp(index) {
+ }
+
+ Address(Register base, int disp)
+ : _base(base),
+ _index_or_disp(disp) {
+ }
+
+#ifdef ASSERT
+ // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
+ Address(Register base, ByteSize disp)
+ : _base(base),
+ _index_or_disp(in_bytes(disp)) {
+ }
+#endif
+
+ // accessors
+ Register base() const { return _base; }
+ Register index() const { return _index_or_disp.as_register(); }
+ int disp() const { return _index_or_disp.as_constant(); }
+
+ bool has_index() const { return _index_or_disp.is_register(); }
+ bool has_disp() const { return _index_or_disp.is_constant(); }
+
+ bool uses(Register reg) const { return base() == reg || (has_index() && index() == reg); }
+
+ const relocInfo::relocType rtype() { return _rspec.type(); }
+ const RelocationHolder& rspec() { return _rspec; }
+
+ RelocationHolder rspec(int offset) const {
+ return offset == 0 ? _rspec : _rspec.plus(offset);
+ }
+
+ inline bool is_simm13(int offset = 0); // check disp+offset for overflow
+
+ Address plus_disp(int plusdisp) const { // bump disp by a small amount
+ assert(_index_or_disp.is_constant(), "must have a displacement");
+ Address a(base(), disp() + plusdisp);
+ return a;
+ }
+ bool is_same_address(Address a) const {
+ // disregard _rspec
+ return base() == a.base() && (has_index() ? index() == a.index() : disp() == a.disp());
+ }
+
+ Address after_save() const {
+ Address a = (*this);
+ a._base = a._base->after_save();
+ return a;
+ }
+
+ Address after_restore() const {
+ Address a = (*this);
+ a._base = a._base->after_restore();
+ return a;
+ }
+
+ // Convert the raw encoding form into the form expected by the
+ // constructor for Address.
+ static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc);
+
+ friend class Assembler;
+};
+
+
+class AddressLiteral VALUE_OBJ_CLASS_SPEC {
+ private:
+ address _address;
+ RelocationHolder _rspec;
+
+ RelocationHolder rspec_from_rtype(relocInfo::relocType rtype, address addr) {
+ switch (rtype) {
+ case relocInfo::external_word_type:
+ return external_word_Relocation::spec(addr);
+ case relocInfo::internal_word_type:
+ return internal_word_Relocation::spec(addr);
+#ifdef _LP64
+ case relocInfo::opt_virtual_call_type:
+ return opt_virtual_call_Relocation::spec();
+ case relocInfo::static_call_type:
+ return static_call_Relocation::spec();
+ case relocInfo::runtime_call_type:
+ return runtime_call_Relocation::spec();
+#endif
+ case relocInfo::none:
+ return RelocationHolder();
+ default:
+ ShouldNotReachHere();
+ return RelocationHolder();
+ }
+ }
+
+ protected:
+ // creation
+ AddressLiteral() : _address(NULL), _rspec(NULL) {}
+
+ public:
+ AddressLiteral(address addr, RelocationHolder const& rspec)
+ : _address(addr),
+ _rspec(rspec) {}
+
+ // Some constructors to avoid casting at the call site.
+ AddressLiteral(jobject obj, RelocationHolder const& rspec)
+ : _address((address) obj),
+ _rspec(rspec) {}
+
+ AddressLiteral(intptr_t value, RelocationHolder const& rspec)
+ : _address((address) value),
+ _rspec(rspec) {}
+
+ AddressLiteral(address addr, relocInfo::relocType rtype = relocInfo::none)
+ : _address((address) addr),
+ _rspec(rspec_from_rtype(rtype, (address) addr)) {}
+
+ // Some constructors to avoid casting at the call site.
+ AddressLiteral(address* addr, relocInfo::relocType rtype = relocInfo::none)
+ : _address((address) addr),
+ _rspec(rspec_from_rtype(rtype, (address) addr)) {}
+
+ AddressLiteral(bool* addr, relocInfo::relocType rtype = relocInfo::none)
+ : _address((address) addr),
+ _rspec(rspec_from_rtype(rtype, (address) addr)) {}
+
+ AddressLiteral(const bool* addr, relocInfo::relocType rtype = relocInfo::none)
+ : _address((address) addr),
+ _rspec(rspec_from_rtype(rtype, (address) addr)) {}
+
+ AddressLiteral(signed char* addr, relocInfo::relocType rtype = relocInfo::none)
+ : _address((address) addr),
+ _rspec(rspec_from_rtype(rtype, (address) addr)) {}
+
+ AddressLiteral(int* addr, relocInfo::relocType rtype = relocInfo::none)
+ : _address((address) addr),
+ _rspec(rspec_from_rtype(rtype, (address) addr)) {}
+
+ AddressLiteral(intptr_t addr, relocInfo::relocType rtype = relocInfo::none)
+ : _address((address) addr),
+ _rspec(rspec_from_rtype(rtype, (address) addr)) {}
+
+#ifdef _LP64
+ // 32-bit complains about a multiple declaration for int*.
+ AddressLiteral(intptr_t* addr, relocInfo::relocType rtype = relocInfo::none)
+ : _address((address) addr),
+ _rspec(rspec_from_rtype(rtype, (address) addr)) {}
+#endif
+
+ AddressLiteral(Metadata* addr, relocInfo::relocType rtype = relocInfo::none)
+ : _address((address) addr),
+ _rspec(rspec_from_rtype(rtype, (address) addr)) {}
+
+ AddressLiteral(Metadata** addr, relocInfo::relocType rtype = relocInfo::none)
+ : _address((address) addr),
+ _rspec(rspec_from_rtype(rtype, (address) addr)) {}
+
+ AddressLiteral(float* addr, relocInfo::relocType rtype = relocInfo::none)
+ : _address((address) addr),
+ _rspec(rspec_from_rtype(rtype, (address) addr)) {}
+
+ AddressLiteral(double* addr, relocInfo::relocType rtype = relocInfo::none)
+ : _address((address) addr),
+ _rspec(rspec_from_rtype(rtype, (address) addr)) {}
+
+ intptr_t value() const { return (intptr_t) _address; }
+ int low10() const;
+
+ const relocInfo::relocType rtype() const { return _rspec.type(); }
+ const RelocationHolder& rspec() const { return _rspec; }
+
+ RelocationHolder rspec(int offset) const {
+ return offset == 0 ? _rspec : _rspec.plus(offset);
+ }
+};
+
+// Convenience classes
+class ExternalAddress: public AddressLiteral {
+ private:
+ static relocInfo::relocType reloc_for_target(address target) {
+ // Sometimes ExternalAddress is used for values which aren't
+ // exactly addresses, like the card table base.
+ // external_word_type can't be used for values in the first page
+ // so just skip the reloc in that case.
+ return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
+ }
+
+ public:
+ ExternalAddress(address target) : AddressLiteral(target, reloc_for_target( target)) {}
+ ExternalAddress(Metadata** target) : AddressLiteral(target, reloc_for_target((address) target)) {}
+};
+
+inline Address RegisterImpl::address_in_saved_window() const {
+ return (Address(SP, (sp_offset_in_saved_window() * wordSize) + STACK_BIAS));
+}
+
+
+
+// Argument is an abstraction used to represent an outgoing
+// actual argument or an incoming formal parameter, whether
+// it resides in memory or in a register, in a manner consistent
+// with the SPARC Application Binary Interface, or ABI. This is
+// often referred to as the native or C calling convention.
+
+class Argument VALUE_OBJ_CLASS_SPEC {
+ private:
+ int _number;
+ bool _is_in;
+
+ public:
+#ifdef _LP64
+ enum {
+ n_register_parameters = 6, // only 6 registers may contain integer parameters
+ n_float_register_parameters = 16 // Can have up to 16 floating registers
+ };
+#else
+ enum {
+ n_register_parameters = 6 // only 6 registers may contain integer parameters
+ };
+#endif
+
+ // creation
+ Argument(int number, bool is_in) : _number(number), _is_in(is_in) {}
+
+ int number() const { return _number; }
+ bool is_in() const { return _is_in; }
+ bool is_out() const { return !is_in(); }
+
+ Argument successor() const { return Argument(number() + 1, is_in()); }
+ Argument as_in() const { return Argument(number(), true ); }
+ Argument as_out() const { return Argument(number(), false); }
+
+ // locating register-based arguments:
+ bool is_register() const { return _number < n_register_parameters; }
+
+#ifdef _LP64
+ // locating Floating Point register-based arguments:
+ bool is_float_register() const { return _number < n_float_register_parameters; }
+
+ FloatRegister as_float_register() const {
+ assert(is_float_register(), "must be a register argument");
+ return as_FloatRegister(( number() *2 ) + 1);
+ }
+ FloatRegister as_double_register() const {
+ assert(is_float_register(), "must be a register argument");
+ return as_FloatRegister(( number() *2 ));
+ }
+#endif
+
+ Register as_register() const {
+ assert(is_register(), "must be a register argument");
+ return is_in() ? as_iRegister(number()) : as_oRegister(number());
+ }
+
+ // locating memory-based arguments
+ Address as_address() const {
+ assert(!is_register(), "must be a memory argument");
+ return address_in_frame();
+ }
+
+ // When applied to a register-based argument, give the corresponding address
+ // into the 6-word area "into which callee may store register arguments"
+ // (This is a different place than the corresponding register-save area location.)
+ Address address_in_frame() const;
+
+ // debugging
+ const char* name() const;
+
+ friend class Assembler;
+};
+
+
+class RegistersForDebugging : public StackObj {
+ public:
+ intptr_t i[8], l[8], o[8], g[8];
+ float f[32];
+ double d[32];
+
+ void print(outputStream* s);
+
+ static int i_offset(int j) { return offset_of(RegistersForDebugging, i[j]); }
+ static int l_offset(int j) { return offset_of(RegistersForDebugging, l[j]); }
+ static int o_offset(int j) { return offset_of(RegistersForDebugging, o[j]); }
+ static int g_offset(int j) { return offset_of(RegistersForDebugging, g[j]); }
+ static int f_offset(int j) { return offset_of(RegistersForDebugging, f[j]); }
+ static int d_offset(int j) { return offset_of(RegistersForDebugging, d[j / 2]); }
+
+ // gen asm code to save regs
+ static void save_registers(MacroAssembler* a);
+
+ // restore global registers in case C code disturbed them
+ static void restore_registers(MacroAssembler* a, Register r);
+};
+
+
+// MacroAssembler extends Assembler by a few frequently used macros.
+//
+// Most of the standard SPARC synthetic ops are defined here.
+// Instructions for which a 'better' code sequence exists depending
+// on arguments should also go in here.
+
+#define JMP2(r1, r2) jmp(r1, r2, __FILE__, __LINE__)
+#define JMP(r1, off) jmp(r1, off, __FILE__, __LINE__)
+#define JUMP(a, temp, off) jump(a, temp, off, __FILE__, __LINE__)
+#define JUMPL(a, temp, d, off) jumpl(a, temp, d, off, __FILE__, __LINE__)
+
+
+class MacroAssembler : public Assembler {
+ // code patchers need various routines like inv_wdisp()
+ friend class NativeInstruction;
+ friend class NativeGeneralJump;
+ friend class Relocation;
+ friend class Label;
+
+ protected:
+ static void print_instruction(int inst);
+ static int patched_branch(int dest_pos, int inst, int inst_pos);
+ static int branch_destination(int inst, int pos);
+
+ // Support for VM calls
+ // This is the base routine called by the different versions of call_VM_leaf. The interpreter
+ // may customize this version by overriding it for its purposes (e.g., to save/restore
+ // additional registers when doing a VM call).
+#ifdef CC_INTERP
+ #define VIRTUAL
+#else
+ #define VIRTUAL virtual
+#endif
+
+ VIRTUAL void call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments);
+
+ //
+ // It is imperative that all calls into the VM are handled via the call_VM macros.
+ // They make sure that the stack linkage is setup correctly. call_VM's correspond
+ // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
+ //
+ // This is the base routine called by the different versions of call_VM. The interpreter
+ // may customize this version by overriding it for its purposes (e.g., to save/restore
+ // additional registers when doing a VM call).
+ //
+ // A non-volatile java_thread_cache register should be specified so
+ // that the G2_thread value can be preserved across the call.
+ // (If java_thread_cache is noreg, then a slow get_thread call
+ // will re-initialize the G2_thread.) call_VM_base returns the register that contains the
+ // thread.
+ //
+ // If no last_java_sp is specified (noreg) than SP will be used instead.
+
+ virtual void call_VM_base(
+ Register oop_result, // where an oop-result ends up if any; use noreg otherwise
+ Register java_thread_cache, // the thread if computed before ; use noreg otherwise
+ Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
+ address entry_point, // the entry point
+ int number_of_arguments, // the number of arguments (w/o thread) to pop after call
+ bool check_exception=true // flag which indicates if exception should be checked
+ );
+
+ // This routine should emit JVMTI PopFrame and ForceEarlyReturn handling code.
+ // The implementation is only non-empty for the InterpreterMacroAssembler,
+ // as only the interpreter handles and ForceEarlyReturn PopFrame requests.
+ virtual void check_and_handle_popframe(Register scratch_reg);
+ virtual void check_and_handle_earlyret(Register scratch_reg);
+
+ public:
+ MacroAssembler(CodeBuffer* code) : Assembler(code) {}
+
+ // Support for NULL-checks
+ //
+ // Generates code that causes a NULL OS exception if the content of reg is NULL.
+ // If the accessed location is M[reg + offset] and the offset is known, provide the
+ // offset. No explicit code generation is needed if the offset is within a certain
+ // range (0 <= offset <= page_size).
+ //
+ // %%%%%% Currently not done for SPARC
+
+ void null_check(Register reg, int offset = -1);
+ static bool needs_explicit_null_check(intptr_t offset);
+
+ // support for delayed instructions
+ MacroAssembler* delayed() { Assembler::delayed(); return this; }
+
+ // branches that use right instruction for v8 vs. v9
+ inline void br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
+ inline void br( Condition c, bool a, Predict p, Label& L );
+
+ inline void fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
+ inline void fb( Condition c, bool a, Predict p, Label& L );
+
+ // compares register with zero (32 bit) and branches (V9 and V8 instructions)
+ void cmp_zero_and_br( Condition c, Register s1, Label& L, bool a = false, Predict p = pn );
+ // Compares a pointer register with zero and branches on (not)null.
+ // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
+ void br_null ( Register s1, bool a, Predict p, Label& L );
+ void br_notnull( Register s1, bool a, Predict p, Label& L );
+
+ //
+ // Compare registers and branch with nop in delay slot or cbcond without delay slot.
+ //
+ // ATTENTION: use these instructions with caution because cbcond instruction
+ // has very short distance: 512 instructions (2Kbyte).
+
+ // Compare integer (32 bit) values (icc only).
+ void cmp_and_br_short(Register s1, Register s2, Condition c, Predict p, Label& L);
+ void cmp_and_br_short(Register s1, int simm13a, Condition c, Predict p, Label& L);
+ // Platform depending version for pointer compare (icc on !LP64 and xcc on LP64).
+ void cmp_and_brx_short(Register s1, Register s2, Condition c, Predict p, Label& L);
+ void cmp_and_brx_short(Register s1, int simm13a, Condition c, Predict p, Label& L);
+
+ // Short branch version for compares a pointer pwith zero.
+ void br_null_short ( Register s1, Predict p, Label& L );
+ void br_notnull_short( Register s1, Predict p, Label& L );
+
+ // unconditional short branch
+ void ba_short(Label& L);
+
+ inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
+ inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
+
+ // Branch that tests xcc in LP64 and icc in !LP64
+ inline void brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
+ inline void brx( Condition c, bool a, Predict p, Label& L );
+
+ // unconditional branch
+ inline void ba( Label& L );
+
+ // Branch that tests fp condition codes
+ inline void fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
+ inline void fbp( Condition c, bool a, CC cc, Predict p, Label& L );
+
+ // get PC the best way
+ inline int get_pc( Register d );
+
+ // Sparc shorthands(pp 85, V8 manual, pp 289 V9 manual)
+ inline void cmp( Register s1, Register s2 ) { subcc( s1, s2, G0 ); }
+ inline void cmp( Register s1, int simm13a ) { subcc( s1, simm13a, G0 ); }
+
+ inline void jmp( Register s1, Register s2 );
+ inline void jmp( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
+
+ // Check if the call target is out of wdisp30 range (relative to the code cache)
+ static inline bool is_far_target(address d);
+ inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type );
+ inline void call( Label& L, relocInfo::relocType rt = relocInfo::runtime_call_type );
+ inline void callr( Register s1, Register s2 );
+ inline void callr( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
+
+ // Emits nothing on V8
+ inline void iprefetch( address d, relocInfo::relocType rt = relocInfo::none );
+ inline void iprefetch( Label& L);
+
+ inline void tst( Register s ) { orcc( G0, s, G0 ); }
+
+#ifdef PRODUCT
+ inline void ret( bool trace = TraceJumps ) { if (trace) {
+ mov(I7, O7); // traceable register
+ JMP(O7, 2 * BytesPerInstWord);
+ } else {
+ jmpl( I7, 2 * BytesPerInstWord, G0 );
+ }
+ }
+
+ inline void retl( bool trace = TraceJumps ) { if (trace) JMP(O7, 2 * BytesPerInstWord);
+ else jmpl( O7, 2 * BytesPerInstWord, G0 ); }
+#else
+ void ret( bool trace = TraceJumps );
+ void retl( bool trace = TraceJumps );
+#endif /* PRODUCT */
+
+ // Required platform-specific helpers for Label::patch_instructions.
+ // They _shadow_ the declarations in AbstractAssembler, which are undefined.
+ void pd_patch_instruction(address branch, address target);
+#ifndef PRODUCT
+ static void pd_print_patched_instruction(address branch);
+#endif
+
+ // sethi Macro handles optimizations and relocations
+private:
+ void internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable);
+public:
+ void sethi(const AddressLiteral& addrlit, Register d);
+ void patchable_sethi(const AddressLiteral& addrlit, Register d);
+
+ // compute the number of instructions for a sethi/set
+ static int insts_for_sethi( address a, bool worst_case = false );
+ static int worst_case_insts_for_set();
+
+ // set may be either setsw or setuw (high 32 bits may be zero or sign)
+private:
+ void internal_set(const AddressLiteral& al, Register d, bool ForceRelocatable);
+ static int insts_for_internal_set(intptr_t value);
+public:
+ void set(const AddressLiteral& addrlit, Register d);
+ void set(intptr_t value, Register d);
+ void set(address addr, Register d, RelocationHolder const& rspec);
+ static int insts_for_set(intptr_t value) { return insts_for_internal_set(value); }
+
+ void patchable_set(const AddressLiteral& addrlit, Register d);
+ void patchable_set(intptr_t value, Register d);
+ void set64(jlong value, Register d, Register tmp);
+ static int insts_for_set64(jlong value);
+
+ // sign-extend 32 to 64
+ inline void signx( Register s, Register d ) { sra( s, G0, d); }
+ inline void signx( Register d ) { sra( d, G0, d); }
+
+ inline void not1( Register s, Register d ) { xnor( s, G0, d ); }
+ inline void not1( Register d ) { xnor( d, G0, d ); }
+
+ inline void neg( Register s, Register d ) { sub( G0, s, d ); }
+ inline void neg( Register d ) { sub( G0, d, d ); }
+
+ inline void cas( Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY); }
+ inline void casx( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY); }
+ // Functions for isolating 64 bit atomic swaps for LP64
+ // cas_ptr will perform cas for 32 bit VM's and casx for 64 bit VM's
+ inline void cas_ptr( Register s1, Register s2, Register d) {
+#ifdef _LP64
+ casx( s1, s2, d );
+#else
+ cas( s1, s2, d );
+#endif
+ }
+
+ // Functions for isolating 64 bit shifts for LP64
+ inline void sll_ptr( Register s1, Register s2, Register d );
+ inline void sll_ptr( Register s1, int imm6a, Register d );
+ inline void sll_ptr( Register s1, RegisterOrConstant s2, Register d );
+ inline void srl_ptr( Register s1, Register s2, Register d );
+ inline void srl_ptr( Register s1, int imm6a, Register d );
+
+ // little-endian
+ inline void casl( Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY_LITTLE); }
+ inline void casxl( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY_LITTLE); }
+
+ inline void inc( Register d, int const13 = 1 ) { add( d, const13, d); }
+ inline void inccc( Register d, int const13 = 1 ) { addcc( d, const13, d); }
+
+ inline void dec( Register d, int const13 = 1 ) { sub( d, const13, d); }
+ inline void deccc( Register d, int const13 = 1 ) { subcc( d, const13, d); }
+
+ using Assembler::add;
+ inline void add(Register s1, int simm13a, Register d, relocInfo::relocType rtype);
+ inline void add(Register s1, int simm13a, Register d, RelocationHolder const& rspec);
+ inline void add(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
+ inline void add(const Address& a, Register d, int offset = 0);
+
+ using Assembler::andn;
+ inline void andn( Register s1, RegisterOrConstant s2, Register d);
+
+ inline void btst( Register s1, Register s2 ) { andcc( s1, s2, G0 ); }
+ inline void btst( int simm13a, Register s ) { andcc( s, simm13a, G0 ); }
+
+ inline void bset( Register s1, Register s2 ) { or3( s1, s2, s2 ); }
+ inline void bset( int simm13a, Register s ) { or3( s, simm13a, s ); }
+
+ inline void bclr( Register s1, Register s2 ) { andn( s1, s2, s2 ); }
+ inline void bclr( int simm13a, Register s ) { andn( s, simm13a, s ); }
+
+ inline void btog( Register s1, Register s2 ) { xor3( s1, s2, s2 ); }
+ inline void btog( int simm13a, Register s ) { xor3( s, simm13a, s ); }
+
+ inline void clr( Register d ) { or3( G0, G0, d ); }
+
+ inline void clrb( Register s1, Register s2);
+ inline void clrh( Register s1, Register s2);
+ inline void clr( Register s1, Register s2);
+ inline void clrx( Register s1, Register s2);
+
+ inline void clrb( Register s1, int simm13a);
+ inline void clrh( Register s1, int simm13a);
+ inline void clr( Register s1, int simm13a);
+ inline void clrx( Register s1, int simm13a);
+
+ // copy & clear upper word
+ inline void clruw( Register s, Register d ) { srl( s, G0, d); }
+ // clear upper word
+ inline void clruwu( Register d ) { srl( d, G0, d); }
+
+ using Assembler::ldsb;
+ using Assembler::ldsh;
+ using Assembler::ldsw;
+ using Assembler::ldub;
+ using Assembler::lduh;
+ using Assembler::lduw;
+ using Assembler::ldx;
+ using Assembler::ldd;
+
+#ifdef ASSERT
+ // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
+ inline void ld(Register s1, ByteSize simm13a, Register d);
+#endif
+
+ inline void ld(Register s1, Register s2, Register d);
+ inline void ld(Register s1, int simm13a, Register d);
+
+ inline void ldsb(const Address& a, Register d, int offset = 0);
+ inline void ldsh(const Address& a, Register d, int offset = 0);
+ inline void ldsw(const Address& a, Register d, int offset = 0);
+ inline void ldub(const Address& a, Register d, int offset = 0);
+ inline void lduh(const Address& a, Register d, int offset = 0);
+ inline void lduw(const Address& a, Register d, int offset = 0);
+ inline void ldx( const Address& a, Register d, int offset = 0);
+ inline void ld( const Address& a, Register d, int offset = 0);
+ inline void ldd( const Address& a, Register d, int offset = 0);
+
+ inline void ldub(Register s1, RegisterOrConstant s2, Register d );
+ inline void ldsb(Register s1, RegisterOrConstant s2, Register d );
+ inline void lduh(Register s1, RegisterOrConstant s2, Register d );
+ inline void ldsh(Register s1, RegisterOrConstant s2, Register d );
+ inline void lduw(Register s1, RegisterOrConstant s2, Register d );
+ inline void ldsw(Register s1, RegisterOrConstant s2, Register d );
+ inline void ldx( Register s1, RegisterOrConstant s2, Register d );
+ inline void ld( Register s1, RegisterOrConstant s2, Register d );
+ inline void ldd( Register s1, RegisterOrConstant s2, Register d );
+
+ using Assembler::ldf;
+ inline void ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d);
+ inline void ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset = 0);
+
+ // membar psuedo instruction. takes into account target memory model.
+ inline void membar( Assembler::Membar_mask_bits const7a );
+
+ // returns if membar generates anything.
+ inline bool membar_has_effect( Assembler::Membar_mask_bits const7a );
+
+ // mov pseudo instructions
+ inline void mov( Register s, Register d) {
+ if ( s != d ) or3( G0, s, d);
+ else assert_not_delayed(); // Put something useful in the delay slot!
+ }
+
+ inline void mov_or_nop( Register s, Register d) {
+ if ( s != d ) or3( G0, s, d);
+ else nop();
+ }
+
+ inline void mov( int simm13a, Register d) { or3( G0, simm13a, d); }
+
+ using Assembler::prefetch;
+ inline void prefetch(const Address& a, PrefetchFcn F, int offset = 0);
+
+ using Assembler::stb;
+ using Assembler::sth;
+ using Assembler::stw;
+ using Assembler::stx;
+ using Assembler::std;
+
+#ifdef ASSERT
+ // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
+ inline void st(Register d, Register s1, ByteSize simm13a);
+#endif
+
+ inline void st(Register d, Register s1, Register s2);
+ inline void st(Register d, Register s1, int simm13a);
+
+ inline void stb(Register d, const Address& a, int offset = 0 );
+ inline void sth(Register d, const Address& a, int offset = 0 );
+ inline void stw(Register d, const Address& a, int offset = 0 );
+ inline void stx(Register d, const Address& a, int offset = 0 );
+ inline void st( Register d, const Address& a, int offset = 0 );
+ inline void std(Register d, const Address& a, int offset = 0 );
+
+ inline void stb(Register d, Register s1, RegisterOrConstant s2 );
+ inline void sth(Register d, Register s1, RegisterOrConstant s2 );
+ inline void stw(Register d, Register s1, RegisterOrConstant s2 );
+ inline void stx(Register d, Register s1, RegisterOrConstant s2 );
+ inline void std(Register d, Register s1, RegisterOrConstant s2 );
+ inline void st( Register d, Register s1, RegisterOrConstant s2 );
+
+ using Assembler::stf;
+ inline void stf(FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2);
+ inline void stf(FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset = 0);
+
+ // Note: offset is added to s2.
+ using Assembler::sub;
+ inline void sub(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
+
+ using Assembler::swap;
+ inline void swap(Address& a, Register d, int offset = 0);
+
+ // address pseudos: make these names unlike instruction names to avoid confusion
+ inline intptr_t load_pc_address( Register reg, int bytes_to_skip );
+ inline void load_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
+ inline void load_bool_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
+ inline void load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
+ inline void store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
+ inline void store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
+ inline void jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset = 0);
+ inline void jump_to(const AddressLiteral& addrlit, Register temp, int offset = 0);
+ inline void jump_indirect_to(Address& a, Register temp, int ld_offset = 0, int jmp_offset = 0);
+
+ // ring buffer traceable jumps
+
+ void jmp2( Register r1, Register r2, const char* file, int line );
+ void jmp ( Register r1, int offset, const char* file, int line );
+
+ void jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line);
+ void jump (const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line);
+
+
+ // argument pseudos:
+
+ inline void load_argument( Argument& a, Register d );
+ inline void store_argument( Register s, Argument& a );
+ inline void store_ptr_argument( Register s, Argument& a );
+ inline void store_float_argument( FloatRegister s, Argument& a );
+ inline void store_double_argument( FloatRegister s, Argument& a );
+ inline void store_long_argument( Register s, Argument& a );
+
+ // handy macros:
+
+ inline void round_to( Register r, int modulus ) {
+ assert_not_delayed();
+ inc( r, modulus - 1 );
+ and3( r, -modulus, r );
+ }
+
+ // --------------------------------------------------
+
+ // Functions for isolating 64 bit loads for LP64
+ // ld_ptr will perform ld for 32 bit VM's and ldx for 64 bit VM's
+ // st_ptr will perform st for 32 bit VM's and stx for 64 bit VM's
+ inline void ld_ptr(Register s1, Register s2, Register d);
+ inline void ld_ptr(Register s1, int simm13a, Register d);
+ inline void ld_ptr(Register s1, RegisterOrConstant s2, Register d);
+ inline void ld_ptr(const Address& a, Register d, int offset = 0);
+ inline void st_ptr(Register d, Register s1, Register s2);
+ inline void st_ptr(Register d, Register s1, int simm13a);
+ inline void st_ptr(Register d, Register s1, RegisterOrConstant s2);
+ inline void st_ptr(Register d, const Address& a, int offset = 0);
+
+#ifdef ASSERT
+ // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
+ inline void ld_ptr(Register s1, ByteSize simm13a, Register d);
+ inline void st_ptr(Register d, Register s1, ByteSize simm13a);
+#endif
+
+ // ld_long will perform ldd for 32 bit VM's and ldx for 64 bit VM's
+ // st_long will perform std for 32 bit VM's and stx for 64 bit VM's
+ inline void ld_long(Register s1, Register s2, Register d);
+ inline void ld_long(Register s1, int simm13a, Register d);
+ inline void ld_long(Register s1, RegisterOrConstant s2, Register d);
+ inline void ld_long(const Address& a, Register d, int offset = 0);
+ inline void st_long(Register d, Register s1, Register s2);
+ inline void st_long(Register d, Register s1, int simm13a);
+ inline void st_long(Register d, Register s1, RegisterOrConstant s2);
+ inline void st_long(Register d, const Address& a, int offset = 0);
+
+ // Helpers for address formation.
+ // - They emit only a move if s2 is a constant zero.
+ // - If dest is a constant and either s1 or s2 is a register, the temp argument is required and becomes the result.
+ // - If dest is a register and either s1 or s2 is a non-simm13 constant, the temp argument is required and used to materialize the constant.
+ RegisterOrConstant regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
+ RegisterOrConstant regcon_inc_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
+ RegisterOrConstant regcon_sll_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
+
+ RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant src, Register temp) {
+ if (is_simm13(src.constant_or_zero()))
+ return src; // register or short constant
+ guarantee(temp != noreg, "constant offset overflow");
+ set(src.as_constant(), temp);
+ return temp;
+ }
+
+ // --------------------------------------------------
+
+ public:
+ // traps as per trap.h (SPARC ABI?)
+
+ void breakpoint_trap();
+ void breakpoint_trap(Condition c, CC cc);
+ void flush_windows_trap();
+ void clean_windows_trap();
+ void get_psr_trap();
+ void set_psr_trap();
+
+ // V8/V9 flush_windows
+ void flush_windows();
+
+ // Support for serializing memory accesses between threads
+ void serialize_memory(Register thread, Register tmp1, Register tmp2);
+
+ // Stack frame creation/removal
+ void enter();
+ void leave();
+
+ // V8/V9 integer multiply
+ void mult(Register s1, Register s2, Register d);
+ void mult(Register s1, int simm13a, Register d);
+
+ // V8/V9 read and write of condition codes.
+ void read_ccr(Register d);
+ void write_ccr(Register s);
+
+ // Manipulation of C++ bools
+ // These are idioms to flag the need for care with accessing bools but on
+ // this platform we assume byte size
+
+ inline void stbool(Register d, const Address& a) { stb(d, a); }
+ inline void ldbool(const Address& a, Register d) { ldub(a, d); }
+ inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); }
+
+ // klass oop manipulations if compressed
+ void load_klass(Register src_oop, Register klass);
+ void store_klass(Register klass, Register dst_oop);
+ void store_klass_gap(Register s, Register dst_oop);
+
+ // oop manipulations
+ void load_heap_oop(const Address& s, Register d);
+ void load_heap_oop(Register s1, Register s2, Register d);
+ void load_heap_oop(Register s1, int simm13a, Register d);
+ void load_heap_oop(Register s1, RegisterOrConstant s2, Register d);
+ void store_heap_oop(Register d, Register s1, Register s2);
+ void store_heap_oop(Register d, Register s1, int simm13a);
+ void store_heap_oop(Register d, const Address& a, int offset = 0);
+
+ void encode_heap_oop(Register src, Register dst);
+ void encode_heap_oop(Register r) {
+ encode_heap_oop(r, r);
+ }
+ void decode_heap_oop(Register src, Register dst);
+ void decode_heap_oop(Register r) {
+ decode_heap_oop(r, r);
+ }
+ void encode_heap_oop_not_null(Register r);
+ void decode_heap_oop_not_null(Register r);
+ void encode_heap_oop_not_null(Register src, Register dst);
+ void decode_heap_oop_not_null(Register src, Register dst);
+
+ void encode_klass_not_null(Register r);
+ void decode_klass_not_null(Register r);
+ void encode_klass_not_null(Register src, Register dst);
+ void decode_klass_not_null(Register src, Register dst);
+
+ // Support for managing the JavaThread pointer (i.e.; the reference to
+ // thread-local information).
+ void get_thread(); // load G2_thread
+ void verify_thread(); // verify G2_thread contents
+ void save_thread (const Register threache); // save to cache
+ void restore_thread(const Register thread_cache); // restore from cache
+
+ // Support for last Java frame (but use call_VM instead where possible)
+ void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
+ void reset_last_Java_frame(void);
+
+ // Call into the VM.
+ // Passes the thread pointer (in O0) as a prepended argument.
+ // Makes sure oop return values are visible to the GC.
+ void call_VM(Register oop_result, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
+ void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
+ void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
+ void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
+
+ // these overloadings are not presently used on SPARC:
+ void call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
+ void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
+ void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
+ void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
+
+ void call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments = 0);
+ void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1);
+ void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2);
+ void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3);
+
+ void get_vm_result (Register oop_result);
+ void get_vm_result_2(Register metadata_result);
+
+ // vm result is currently getting hijacked to for oop preservation
+ void set_vm_result(Register oop_result);
+
+ // Emit the CompiledIC call idiom
+ void ic_call(address entry, bool emit_delay = true);
+
+ // if call_VM_base was called with check_exceptions=false, then call
+ // check_and_forward_exception to handle exceptions when it is safe
+ void check_and_forward_exception(Register scratch_reg);
+
+ private:
+ // For V8
+ void read_ccr_trap(Register ccr_save);
+ void write_ccr_trap(Register ccr_save1, Register scratch1, Register scratch2);
+
+#ifdef ASSERT
+ // For V8 debugging. Uses V8 instruction sequence and checks
+ // result with V9 insturctions rdccr and wrccr.
+ // Uses Gscatch and Gscatch2
+ void read_ccr_v8_assert(Register ccr_save);
+ void write_ccr_v8_assert(Register ccr_save);
+#endif // ASSERT
+
+ public:
+
+ // Write to card table for - register is destroyed afterwards.
+ void card_table_write(jbyte* byte_map_base, Register tmp, Register obj);
+
+ void card_write_barrier_post(Register store_addr, Register new_val, Register tmp);
+
+#ifndef SERIALGC
+ // General G1 pre-barrier generator.
+ void g1_write_barrier_pre(Register obj, Register index, int offset, Register pre_val, Register tmp, bool preserve_o_regs);
+
+ // General G1 post-barrier generator
+ void g1_write_barrier_post(Register store_addr, Register new_val, Register tmp);
+#endif // SERIALGC
+
+ // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
+ void push_fTOS();
+
+ // pops double TOS element from CPU stack and pushes on FPU stack
+ void pop_fTOS();
+
+ void empty_FPU_stack();
+
+ void push_IU_state();
+ void pop_IU_state();
+
+ void push_FPU_state();
+ void pop_FPU_state();
+
+ void push_CPU_state();
+ void pop_CPU_state();
+
+ // if heap base register is used - reinit it with the correct value
+ void reinit_heapbase();
+
+ // Debugging
+ void _verify_oop(Register reg, const char * msg, const char * file, int line);
+ void _verify_oop_addr(Address addr, const char * msg, const char * file, int line);
+
+ // TODO: verify_method and klass metadata (compare against vptr?)
+ void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
+ void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
+
+#define verify_oop(reg) _verify_oop(reg, "broken oop " #reg, __FILE__, __LINE__)
+#define verify_oop_addr(addr) _verify_oop_addr(addr, "broken oop addr ", __FILE__, __LINE__)
+#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
+#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
+
+ // only if +VerifyOops
+ void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
+ // only if +VerifyFPU
+ void stop(const char* msg); // prints msg, dumps registers and stops execution
+ void warn(const char* msg); // prints msg, but don't stop
+ void untested(const char* what = "");
+ void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); }
+ void should_not_reach_here() { stop("should not reach here"); }
+ void print_CPU_state();
+
+ // oops in code
+ AddressLiteral allocate_oop_address(jobject obj); // allocate_index
+ AddressLiteral constant_oop_address(jobject obj); // find_index
+ inline void set_oop (jobject obj, Register d); // uses allocate_oop_address
+ inline void set_oop_constant (jobject obj, Register d); // uses constant_oop_address
+ inline void set_oop (const AddressLiteral& obj_addr, Register d); // same as load_address
+
+ // metadata in code that we have to keep track of
+ AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index
+ AddressLiteral constant_metadata_address(Metadata* obj); // find_index
+ inline void set_metadata (Metadata* obj, Register d); // uses allocate_metadata_address
+ inline void set_metadata_constant (Metadata* obj, Register d); // uses constant_metadata_address
+ inline void set_metadata (const AddressLiteral& obj_addr, Register d); // same as load_address
+
+ void set_narrow_oop( jobject obj, Register d );
+ void set_narrow_klass( Klass* k, Register d );
+
+ // nop padding
+ void align(int modulus);
+
+ // declare a safepoint
+ void safepoint();
+
+ // factor out part of stop into subroutine to save space
+ void stop_subroutine();
+ // factor out part of verify_oop into subroutine to save space
+ void verify_oop_subroutine();
+
+ // side-door communication with signalHandler in os_solaris.cpp
+ static address _verify_oop_implicit_branch[3];
+
+ int total_frame_size_in_bytes(int extraWords);
+
+ // used when extraWords known statically
+ void save_frame(int extraWords = 0);
+ void save_frame_c1(int size_in_bytes);
+ // make a frame, and simultaneously pass up one or two register value
+ // into the new register window
+ void save_frame_and_mov(int extraWords, Register s1, Register d1, Register s2 = Register(), Register d2 = Register());
+
+ // give no. (outgoing) params, calc # of words will need on frame
+ void calc_mem_param_words(Register Rparam_words, Register Rresult);
+
+ // used to calculate frame size dynamically
+ // result is in bytes and must be negated for save inst
+ void calc_frame_size(Register extraWords, Register resultReg);
+
+ // calc and also save
+ void calc_frame_size_and_save(Register extraWords, Register resultReg);
+
+ static void debug(char* msg, RegistersForDebugging* outWindow);
+
+ // implementations of bytecodes used by both interpreter and compiler
+
+ void lcmp( Register Ra_hi, Register Ra_low,
+ Register Rb_hi, Register Rb_low,
+ Register Rresult);
+
+ void lneg( Register Rhi, Register Rlow );
+
+ void lshl( Register Rin_high, Register Rin_low, Register Rcount,
+ Register Rout_high, Register Rout_low, Register Rtemp );
+
+ void lshr( Register Rin_high, Register Rin_low, Register Rcount,
+ Register Rout_high, Register Rout_low, Register Rtemp );
+
+ void lushr( Register Rin_high, Register Rin_low, Register Rcount,
+ Register Rout_high, Register Rout_low, Register Rtemp );
+
+#ifdef _LP64
+ void lcmp( Register Ra, Register Rb, Register Rresult);
+#endif
+
+ // Load and store values by size and signed-ness
+ void load_sized_value( Address src, Register dst, size_t size_in_bytes, bool is_signed);
+ void store_sized_value(Register src, Address dst, size_t size_in_bytes);
+
+ void float_cmp( bool is_float, int unordered_result,
+ FloatRegister Fa, FloatRegister Fb,
+ Register Rresult);
+
+ void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
+ void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { Assembler::fneg(w, sd); }
+ void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
+ void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
+
+ void save_all_globals_into_locals();
+ void restore_globals_from_locals();
+
+ void casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
+ address lock_addr=0, bool use_call_vm=false);
+ void cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
+ address lock_addr=0, bool use_call_vm=false);
+ void casn (Register addr_reg, Register cmp_reg, Register set_reg) ;
+
+ // These set the icc condition code to equal if the lock succeeded
+ // and notEqual if it failed and requires a slow case
+ void compiler_lock_object(Register Roop, Register Rmark, Register Rbox,
+ Register Rscratch,
+ BiasedLockingCounters* counters = NULL,
+ bool try_bias = UseBiasedLocking);
+ void compiler_unlock_object(Register Roop, Register Rmark, Register Rbox,
+ Register Rscratch,
+ bool try_bias = UseBiasedLocking);
+
+ // Biased locking support
+ // Upon entry, lock_reg must point to the lock record on the stack,
+ // obj_reg must contain the target object, and mark_reg must contain
+ // the target object's header.
+ // Destroys mark_reg if an attempt is made to bias an anonymously
+ // biased lock. In this case a failure will go either to the slow
+ // case or fall through with the notEqual condition code set with
+ // the expectation that the slow case in the runtime will be called.
+ // In the fall-through case where the CAS-based lock is done,
+ // mark_reg is not destroyed.
+ void biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg,
+ Label& done, Label* slow_case = NULL,
+ BiasedLockingCounters* counters = NULL);
+ // Upon entry, the base register of mark_addr must contain the oop.
+ // Destroys temp_reg.
+
+ // If allow_delay_slot_filling is set to true, the next instruction
+ // emitted after this one will go in an annulled delay slot if the
+ // biased locking exit case failed.
+ void biased_locking_exit(Address mark_addr, Register temp_reg, Label& done, bool allow_delay_slot_filling = false);
+
+ // allocation
+ void eden_allocate(
+ Register obj, // result: pointer to object after successful allocation
+ Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
+ int con_size_in_bytes, // object size in bytes if known at compile time
+ Register t1, // temp register
+ Register t2, // temp register
+ Label& slow_case // continuation point if fast allocation fails
+ );
+ void tlab_allocate(
+ Register obj, // result: pointer to object after successful allocation
+ Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
+ int con_size_in_bytes, // object size in bytes if known at compile time
+ Register t1, // temp register
+ Label& slow_case // continuation point if fast allocation fails
+ );
+ void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
+ void incr_allocated_bytes(RegisterOrConstant size_in_bytes,
+ Register t1, Register t2);
+
+ // interface method calling
+ void lookup_interface_method(Register recv_klass,
+ Register intf_klass,
+ RegisterOrConstant itable_index,
+ Register method_result,
+ Register temp_reg, Register temp2_reg,
+ Label& no_such_interface);
+
+ // virtual method calling
+ void lookup_virtual_method(Register recv_klass,
+ RegisterOrConstant vtable_index,
+ Register method_result);
+
+ // Test sub_klass against super_klass, with fast and slow paths.
+
+ // The fast path produces a tri-state answer: yes / no / maybe-slow.
+ // One of the three labels can be NULL, meaning take the fall-through.
+ // If super_check_offset is -1, the value is loaded up from super_klass.
+ // No registers are killed, except temp_reg and temp2_reg.
+ // If super_check_offset is not -1, temp2_reg is not used and can be noreg.
+ void check_klass_subtype_fast_path(Register sub_klass,
+ Register super_klass,
+ Register temp_reg,
+ Register temp2_reg,
+ Label* L_success,
+ Label* L_failure,
+ Label* L_slow_path,
+ RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
+
+ // The rest of the type check; must be wired to a corresponding fast path.
+ // It does not repeat the fast path logic, so don't use it standalone.
+ // The temp_reg can be noreg, if no temps are available.
+ // It can also be sub_klass or super_klass, meaning it's OK to kill that one.
+ // Updates the sub's secondary super cache as necessary.
+ void check_klass_subtype_slow_path(Register sub_klass,
+ Register super_klass,
+ Register temp_reg,
+ Register temp2_reg,
+ Register temp3_reg,
+ Register temp4_reg,
+ Label* L_success,
+ Label* L_failure);
+
+ // Simplified, combined version, good for typical uses.
+ // Falls through on failure.
+ void check_klass_subtype(Register sub_klass,
+ Register super_klass,
+ Register temp_reg,
+ Register temp2_reg,
+ Label& L_success);
+
+ // method handles (JSR 292)
+ // offset relative to Gargs of argument at tos[arg_slot].
+ // (arg_slot == 0 means the last argument, not the first).
+ RegisterOrConstant argument_offset(RegisterOrConstant arg_slot,
+ Register temp_reg,
+ int extra_slot_offset = 0);
+ // Address of Gargs and argument_offset.
+ Address argument_address(RegisterOrConstant arg_slot,
+ Register temp_reg = noreg,
+ int extra_slot_offset = 0);
+
+ // Stack overflow checking
+
+ // Note: this clobbers G3_scratch
+ void bang_stack_with_offset(int offset) {
+ // stack grows down, caller passes positive offset
+ assert(offset > 0, "must bang with negative offset");
+ set((-offset)+STACK_BIAS, G3_scratch);
+ st(G0, SP, G3_scratch);
+ }
+
+ // Writes to stack successive pages until offset reached to check for
+ // stack overflow + shadow pages. Clobbers tsp and scratch registers.
+ void bang_stack_size(Register Rsize, Register Rtsp, Register Rscratch);
+
+ virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset);
+
+ void verify_tlab();
+
+ Condition negate_condition(Condition cond);
+
+ // Helper functions for statistics gathering.
+ // Conditionally (non-atomically) increments passed counter address, preserving condition codes.
+ void cond_inc(Condition cond, address counter_addr, Register Rtemp1, Register Rtemp2);
+ // Unconditional increment.
+ void inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2);
+ void inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2);
+
+ // Compare char[] arrays aligned to 4 bytes.
+ void char_arrays_equals(Register ary1, Register ary2,
+ Register limit, Register result,
+ Register chr1, Register chr2, Label& Ldone);
+ // Use BIS for zeroing
+ void bis_zeroing(Register to, Register count, Register temp, Label& Ldone);
+
+#undef VIRTUAL
+};
+
+/**
+ * class SkipIfEqual:
+ *
+ * Instantiating this class will result in assembly code being output that will
+ * jump around any code emitted between the creation of the instance and it's
+ * automatic destruction at the end of a scope block, depending on the value of
+ * the flag passed to the constructor, which will be checked at run-time.
+ */
+class SkipIfEqual : public StackObj {
+ private:
+ MacroAssembler* _masm;
+ Label _label;
+
+ public:
+ // 'temp' is a temp register that this object can use (and trash)
+ SkipIfEqual(MacroAssembler*, Register temp,
+ const bool* flag_addr, Assembler::Condition condition);
+ ~SkipIfEqual();
+};
+
+#endif // CPU_SPARC_VM_MACROASSEMBLER_SPARC_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,765 @@
+/*
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP
+#define CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP
+
+#include "asm/assembler.inline.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/codeBuffer.hpp"
+#include "code/codeCache.hpp"
+
+inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); }
+
+
+inline int AddressLiteral::low10() const {
+ return Assembler::low10(value());
+}
+
+
+inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
+ jint& stub_inst = *(jint*) branch;
+ stub_inst = patched_branch(target - branch, stub_inst, 0);
+}
+
+#ifndef PRODUCT
+inline void MacroAssembler::pd_print_patched_instruction(address branch) {
+ jint stub_inst = *(jint*) branch;
+ print_instruction(stub_inst);
+ ::tty->print("%s", " (unresolved)");
+}
+#endif // PRODUCT
+
+// Use the right loads/stores for the platform
+inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
+#ifdef _LP64
+ Assembler::ldx(s1, s2, d);
+#else
+ ld( s1, s2, d);
+#endif
+}
+
+inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
+#ifdef _LP64
+ Assembler::ldx(s1, simm13a, d);
+#else
+ ld( s1, simm13a, d);
+#endif
+}
+
+#ifdef ASSERT
+// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
+inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) {
+ ld_ptr(s1, in_bytes(simm13a), d);
+}
+#endif
+
+inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
+#ifdef _LP64
+ ldx(s1, s2, d);
+#else
+ ld( s1, s2, d);
+#endif
+}
+
+inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
+#ifdef _LP64
+ ldx(a, d, offset);
+#else
+ ld( a, d, offset);
+#endif
+}
+
+inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
+#ifdef _LP64
+ Assembler::stx(d, s1, s2);
+#else
+ st( d, s1, s2);
+#endif
+}
+
+inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
+#ifdef _LP64
+ Assembler::stx(d, s1, simm13a);
+#else
+ st( d, s1, simm13a);
+#endif
+}
+
+#ifdef ASSERT
+// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
+inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) {
+ st_ptr(d, s1, in_bytes(simm13a));
+}
+#endif
+
+inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
+#ifdef _LP64
+ stx(d, s1, s2);
+#else
+ st( d, s1, s2);
+#endif
+}
+
+inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
+#ifdef _LP64
+ stx(d, a, offset);
+#else
+ st( d, a, offset);
+#endif
+}
+
+// Use the right loads/stores for the platform
+inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) {
+#ifdef _LP64
+ Assembler::ldx(s1, s2, d);
+#else
+ Assembler::ldd(s1, s2, d);
+#endif
+}
+
+inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) {
+#ifdef _LP64
+ Assembler::ldx(s1, simm13a, d);
+#else
+ Assembler::ldd(s1, simm13a, d);
+#endif
+}
+
+inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) {
+#ifdef _LP64
+ ldx(s1, s2, d);
+#else
+ ldd(s1, s2, d);
+#endif
+}
+
+inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
+#ifdef _LP64
+ ldx(a, d, offset);
+#else
+ ldd(a, d, offset);
+#endif
+}
+
+inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) {
+#ifdef _LP64
+ Assembler::stx(d, s1, s2);
+#else
+ Assembler::std(d, s1, s2);
+#endif
+}
+
+inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) {
+#ifdef _LP64
+ Assembler::stx(d, s1, simm13a);
+#else
+ Assembler::std(d, s1, simm13a);
+#endif
+}
+
+inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) {
+#ifdef _LP64
+ stx(d, s1, s2);
+#else
+ std(d, s1, s2);
+#endif
+}
+
+inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) {
+#ifdef _LP64
+ stx(d, a, offset);
+#else
+ std(d, a, offset);
+#endif
+}
+
+// Functions for isolating 64 bit shifts for LP64
+
+inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
+#ifdef _LP64
+ Assembler::sllx(s1, s2, d);
+#else
+ Assembler::sll( s1, s2, d);
+#endif
+}
+
+inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) {
+#ifdef _LP64
+ Assembler::sllx(s1, imm6a, d);
+#else
+ Assembler::sll( s1, imm6a, d);
+#endif
+}
+
+inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
+#ifdef _LP64
+ Assembler::srlx(s1, s2, d);
+#else
+ Assembler::srl( s1, s2, d);
+#endif
+}
+
+inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) {
+#ifdef _LP64
+ Assembler::srlx(s1, imm6a, d);
+#else
+ Assembler::srl( s1, imm6a, d);
+#endif
+}
+
+inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
+ if (s2.is_register()) sll_ptr(s1, s2.as_register(), d);
+ else sll_ptr(s1, s2.as_constant(), d);
+}
+
+// Use the right branch for the platform
+
+inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
+ if (VM_Version::v9_instructions_work())
+ Assembler::bp(c, a, icc, p, d, rt);
+ else
+ Assembler::br(c, a, d, rt);
+}
+
+inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
+ br(c, a, p, target(L));
+}
+
+
+// Branch that tests either xcc or icc depending on the
+// architecture compiled (LP64 or not)
+inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
+#ifdef _LP64
+ Assembler::bp(c, a, xcc, p, d, rt);
+#else
+ MacroAssembler::br(c, a, p, d, rt);
+#endif
+}
+
+inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
+ brx(c, a, p, target(L));
+}
+
+inline void MacroAssembler::ba( Label& L ) {
+ br(always, false, pt, L);
+}
+
+// Warning: V9 only functions
+inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
+ Assembler::bp(c, a, cc, p, d, rt);
+}
+
+inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) {
+ Assembler::bp(c, a, cc, p, L);
+}
+
+inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
+ if (VM_Version::v9_instructions_work())
+ fbp(c, a, fcc0, p, d, rt);
+ else
+ Assembler::fb(c, a, d, rt);
+}
+
+inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
+ fb(c, a, p, target(L));
+}
+
+inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
+ Assembler::fbp(c, a, cc, p, d, rt);
+}
+
+inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) {
+ Assembler::fbp(c, a, cc, p, L);
+}
+
+inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); }
+inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); }
+
+inline bool MacroAssembler::is_far_target(address d) {
+ if (ForceUnreachable) {
+ // References outside the code cache should be treated as far
+ return d < CodeCache::low_bound() || d > CodeCache::high_bound();
+ }
+ return !is_in_wdisp30_range(d, CodeCache::low_bound()) || !is_in_wdisp30_range(d, CodeCache::high_bound());
+}
+
+// Call with a check to see if we need to deal with the added
+// expense of relocation and if we overflow the displacement
+// of the quick call instruction.
+inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
+#ifdef _LP64
+ intptr_t disp;
+ // NULL is ok because it will be relocated later.
+ // Must change NULL to a reachable address in order to
+ // pass asserts here and in wdisp.
+ if ( d == NULL )
+ d = pc();
+
+ // Is this address within range of the call instruction?
+ // If not, use the expensive instruction sequence
+ if (is_far_target(d)) {
+ relocate(rt);
+ AddressLiteral dest(d);
+ jumpl_to(dest, O7, O7);
+ } else {
+ Assembler::call(d, rt);
+ }
+#else
+ Assembler::call( d, rt );
+#endif
+}
+
+inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
+ MacroAssembler::call( target(L), rt);
+}
+
+
+
+inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); }
+inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); }
+
+// prefetch instruction
+inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
+ if (VM_Version::v9_instructions_work())
+ Assembler::bp( never, true, xcc, pt, d, rt );
+}
+inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
+
+
+// clobbers o7 on V8!!
+// returns delta from gotten pc to addr after
+inline int MacroAssembler::get_pc( Register d ) {
+ int x = offset();
+ if (VM_Version::v9_instructions_work())
+ rdpc(d);
+ else {
+ Label lbl;
+ Assembler::call(lbl, relocInfo::none); // No relocation as this is call to pc+0x8
+ if (d == O7) delayed()->nop();
+ else delayed()->mov(O7, d);
+ bind(lbl);
+ }
+ return offset() - x;
+}
+
+
+// Note: All MacroAssembler::set_foo functions are defined out-of-line.
+
+
+// Loads the current PC of the following instruction as an immediate value in
+// 2 instructions. All PCs in the CodeCache are within 2 Gig of each other.
+inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
+ intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
+#ifdef _LP64
+ Unimplemented();
+#else
+ Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
+ add(reg, thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc));
+#endif
+ return thepc;
+}
+
+
+inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Register d, int offset) {
+ assert_not_delayed();
+ if (ForceUnreachable) {
+ patchable_sethi(addrlit, d);
+ } else {
+ sethi(addrlit, d);
+ }
+ ld(d, addrlit.low10() + offset, d);
+}
+
+
+inline void MacroAssembler::load_bool_contents(const AddressLiteral& addrlit, Register d, int offset) {
+ assert_not_delayed();
+ if (ForceUnreachable) {
+ patchable_sethi(addrlit, d);
+ } else {
+ sethi(addrlit, d);
+ }
+ ldub(d, addrlit.low10() + offset, d);
+}
+
+
+inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) {
+ assert_not_delayed();
+ if (ForceUnreachable) {
+ patchable_sethi(addrlit, d);
+ } else {
+ sethi(addrlit, d);
+ }
+ ld_ptr(d, addrlit.low10() + offset, d);
+}
+
+
+inline void MacroAssembler::store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
+ assert_not_delayed();
+ if (ForceUnreachable) {
+ patchable_sethi(addrlit, temp);
+ } else {
+ sethi(addrlit, temp);
+ }
+ st(s, temp, addrlit.low10() + offset);
+}
+
+
+inline void MacroAssembler::store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
+ assert_not_delayed();
+ if (ForceUnreachable) {
+ patchable_sethi(addrlit, temp);
+ } else {
+ sethi(addrlit, temp);
+ }
+ st_ptr(s, temp, addrlit.low10() + offset);
+}
+
+
+// This code sequence is relocatable to any address, even on LP64.
+inline void MacroAssembler::jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset) {
+ assert_not_delayed();
+ // Force fixed length sethi because NativeJump and NativeFarCall don't handle
+ // variable length instruction streams.
+ patchable_sethi(addrlit, temp);
+ jmpl(temp, addrlit.low10() + offset, d);
+}
+
+
+inline void MacroAssembler::jump_to(const AddressLiteral& addrlit, Register temp, int offset) {
+ jumpl_to(addrlit, temp, G0, offset);
+}
+
+
+inline void MacroAssembler::jump_indirect_to(Address& a, Register temp,
+ int ld_offset, int jmp_offset) {
+ assert_not_delayed();
+ //sethi(al); // sethi is caller responsibility for this one
+ ld_ptr(a, temp, ld_offset);
+ jmp(temp, jmp_offset);
+}
+
+
+inline void MacroAssembler::set_metadata(Metadata* obj, Register d) {
+ set_metadata(allocate_metadata_address(obj), d);
+}
+
+inline void MacroAssembler::set_metadata_constant(Metadata* obj, Register d) {
+ set_metadata(constant_metadata_address(obj), d);
+}
+
+inline void MacroAssembler::set_metadata(const AddressLiteral& obj_addr, Register d) {
+ assert(obj_addr.rspec().type() == relocInfo::metadata_type, "must be a metadata reloc");
+ set(obj_addr, d);
+}
+
+inline void MacroAssembler::set_oop(jobject obj, Register d) {
+ set_oop(allocate_oop_address(obj), d);
+}
+
+
+inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
+ set_oop(constant_oop_address(obj), d);
+}
+
+
+inline void MacroAssembler::set_oop(const AddressLiteral& obj_addr, Register d) {
+ assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
+ set(obj_addr, d);
+}
+
+
+inline void MacroAssembler::load_argument( Argument& a, Register d ) {
+ if (a.is_register())
+ mov(a.as_register(), d);
+ else
+ ld (a.as_address(), d);
+}
+
+inline void MacroAssembler::store_argument( Register s, Argument& a ) {
+ if (a.is_register())
+ mov(s, a.as_register());
+ else
+ st_ptr (s, a.as_address()); // ABI says everything is right justified.
+}
+
+inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) {
+ if (a.is_register())
+ mov(s, a.as_register());
+ else
+ st_ptr (s, a.as_address());
+}
+
+
+#ifdef _LP64
+inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) {
+ if (a.is_float_register())
+// V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2
+ fmov(FloatRegisterImpl::S, s, a.as_float_register() );
+ else
+ // Floats are stored in the high half of the stack entry
+ // The low half is undefined per the ABI.
+ stf(FloatRegisterImpl::S, s, a.as_address(), sizeof(jfloat));
+}
+
+inline void MacroAssembler::store_double_argument( FloatRegister s, Argument& a ) {
+ if (a.is_float_register())
+// V9 ABI has D0, D2, D4 are used to pass instead of O0, O1, O2
+ fmov(FloatRegisterImpl::D, s, a.as_double_register() );
+ else
+ stf(FloatRegisterImpl::D, s, a.as_address());
+}
+
+inline void MacroAssembler::store_long_argument( Register s, Argument& a ) {
+ if (a.is_register())
+ mov(s, a.as_register());
+ else
+ stx(s, a.as_address());
+}
+#endif
+
+inline void MacroAssembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype) {
+ relocate(rtype);
+ add(s1, simm13a, d);
+}
+inline void MacroAssembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec) {
+ relocate(rspec);
+ add(s1, simm13a, d);
+}
+
+// form effective addresses this way:
+inline void MacroAssembler::add(const Address& a, Register d, int offset) {
+ if (a.has_index()) add(a.base(), a.index(), d);
+ else { add(a.base(), a.disp() + offset, d, a.rspec(offset)); offset = 0; }
+ if (offset != 0) add(d, offset, d);
+}
+inline void MacroAssembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) {
+ if (s2.is_register()) add(s1, s2.as_register(), d);
+ else { add(s1, s2.as_constant() + offset, d); offset = 0; }
+ if (offset != 0) add(d, offset, d);
+}
+
+inline void MacroAssembler::andn(Register s1, RegisterOrConstant s2, Register d) {
+ if (s2.is_register()) andn(s1, s2.as_register(), d);
+ else andn(s1, s2.as_constant(), d);
+}
+
+inline void MacroAssembler::clrb( Register s1, Register s2) { stb( G0, s1, s2 ); }
+inline void MacroAssembler::clrh( Register s1, Register s2) { sth( G0, s1, s2 ); }
+inline void MacroAssembler::clr( Register s1, Register s2) { stw( G0, s1, s2 ); }
+inline void MacroAssembler::clrx( Register s1, Register s2) { stx( G0, s1, s2 ); }
+
+inline void MacroAssembler::clrb( Register s1, int simm13a) { stb( G0, s1, simm13a); }
+inline void MacroAssembler::clrh( Register s1, int simm13a) { sth( G0, s1, simm13a); }
+inline void MacroAssembler::clr( Register s1, int simm13a) { stw( G0, s1, simm13a); }
+inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm13a); }
+
+#ifdef _LP64
+// Make all 32 bit loads signed so 64 bit registers maintain proper sign
+inline void MacroAssembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); }
+inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); }
+#else
+inline void MacroAssembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); }
+inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); }
+#endif
+
+#ifdef ASSERT
+ // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
+# ifdef _LP64
+inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
+# else
+inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
+# endif
+#endif
+
+inline void MacroAssembler::ld( const Address& a, Register d, int offset) {
+ if (a.has_index()) { assert(offset == 0, ""); ld( a.base(), a.index(), d); }
+ else { ld( a.base(), a.disp() + offset, d); }
+}
+
+inline void MacroAssembler::ldsb(const Address& a, Register d, int offset) {
+ if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(), d); }
+ else { ldsb(a.base(), a.disp() + offset, d); }
+}
+inline void MacroAssembler::ldsh(const Address& a, Register d, int offset) {
+ if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(), d); }
+ else { ldsh(a.base(), a.disp() + offset, d); }
+}
+inline void MacroAssembler::ldsw(const Address& a, Register d, int offset) {
+ if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(), d); }
+ else { ldsw(a.base(), a.disp() + offset, d); }
+}
+inline void MacroAssembler::ldub(const Address& a, Register d, int offset) {
+ if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(), d); }
+ else { ldub(a.base(), a.disp() + offset, d); }
+}
+inline void MacroAssembler::lduh(const Address& a, Register d, int offset) {
+ if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(), d); }
+ else { lduh(a.base(), a.disp() + offset, d); }
+}
+inline void MacroAssembler::lduw(const Address& a, Register d, int offset) {
+ if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(), d); }
+ else { lduw(a.base(), a.disp() + offset, d); }
+}
+inline void MacroAssembler::ldd( const Address& a, Register d, int offset) {
+ if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(), d); }
+ else { ldd( a.base(), a.disp() + offset, d); }
+}
+inline void MacroAssembler::ldx( const Address& a, Register d, int offset) {
+ if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(), d); }
+ else { ldx( a.base(), a.disp() + offset, d); }
+}
+
+inline void MacroAssembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); }
+inline void MacroAssembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); }
+inline void MacroAssembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); }
+inline void MacroAssembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); }
+inline void MacroAssembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); }
+inline void MacroAssembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); }
+inline void MacroAssembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); }
+inline void MacroAssembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld( Address(s1, s2), d); }
+inline void MacroAssembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
+
+inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) {
+ if (s2.is_register()) ldf(w, s1, s2.as_register(), d);
+ else ldf(w, s1, s2.as_constant(), d);
+}
+
+inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) {
+ relocate(a.rspec(offset));
+ ldf(w, a.base(), a.disp() + offset, d);
+}
+
+// returns if membar generates anything, obviously this code should mirror
+// membar below.
+inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
+ if( !os::is_MP() ) return false; // Not needed on single CPU
+ if( VM_Version::v9_instructions_work() ) {
+ const Membar_mask_bits effective_mask =
+ Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
+ return (effective_mask != 0);
+ } else {
+ return true;
+ }
+}
+
+inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
+ // Uniprocessors do not need memory barriers
+ if (!os::is_MP()) return;
+ // Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3,
+ // 8.4.4.3, a.31 and a.50.
+ if( VM_Version::v9_instructions_work() ) {
+ // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
+ // of the mmask subfield of const7a that does anything that isn't done
+ // implicitly is StoreLoad.
+ const Membar_mask_bits effective_mask =
+ Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
+ if ( effective_mask != 0 ) {
+ Assembler::membar( effective_mask );
+ }
+ } else {
+ // stbar is the closest there is on v8. Equivalent to membar(StoreStore). We
+ // do not issue the stbar because to my knowledge all v8 machines implement TSO,
+ // which guarantees that all stores behave as if an stbar were issued just after
+ // each one of them. On these machines, stbar ought to be a nop. There doesn't
+ // appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it,
+ // it can't be specified by stbar, nor have I come up with a way to simulate it.
+ //
+ // Addendum. Dave says that ldstub guarantees a write buffer flush to coherent
+ // space. Put one here to be on the safe side.
+ Assembler::ldstub(SP, 0, G0);
+ }
+}
+
+inline void MacroAssembler::prefetch(const Address& a, PrefetchFcn f, int offset) {
+ relocate(a.rspec(offset));
+ assert(!a.has_index(), "");
+ prefetch(a.base(), a.disp() + offset, f);
+}
+
+inline void MacroAssembler::st(Register d, Register s1, Register s2) { stw(d, s1, s2); }
+inline void MacroAssembler::st(Register d, Register s1, int simm13a) { stw(d, s1, simm13a); }
+
+#ifdef ASSERT
+// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
+inline void MacroAssembler::st(Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); }
+#endif
+
+inline void MacroAssembler::st(Register d, const Address& a, int offset) {
+ if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index() ); }
+ else { st( d, a.base(), a.disp() + offset); }
+}
+
+inline void MacroAssembler::stb(Register d, const Address& a, int offset) {
+ if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index() ); }
+ else { stb(d, a.base(), a.disp() + offset); }
+}
+inline void MacroAssembler::sth(Register d, const Address& a, int offset) {
+ if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index() ); }
+ else { sth(d, a.base(), a.disp() + offset); }
+}
+inline void MacroAssembler::stw(Register d, const Address& a, int offset) {
+ if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index() ); }
+ else { stw(d, a.base(), a.disp() + offset); }
+}
+inline void MacroAssembler::std(Register d, const Address& a, int offset) {
+ if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index() ); }
+ else { std(d, a.base(), a.disp() + offset); }
+}
+inline void MacroAssembler::stx(Register d, const Address& a, int offset) {
+ if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index() ); }
+ else { stx(d, a.base(), a.disp() + offset); }
+}
+
+inline void MacroAssembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); }
+inline void MacroAssembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); }
+inline void MacroAssembler::stw(Register d, Register s1, RegisterOrConstant s2) { stw(d, Address(s1, s2)); }
+inline void MacroAssembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); }
+inline void MacroAssembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); }
+inline void MacroAssembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); }
+
+inline void MacroAssembler::stf(FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2) {
+ if (s2.is_register()) stf(w, d, s1, s2.as_register());
+ else stf(w, d, s1, s2.as_constant());
+}
+
+inline void MacroAssembler::stf(FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) {
+ relocate(a.rspec(offset));
+ if (a.has_index()) { assert(offset == 0, ""); stf(w, d, a.base(), a.index() ); }
+ else { stf(w, d, a.base(), a.disp() + offset); }
+}
+
+inline void MacroAssembler::sub(Register s1, RegisterOrConstant s2, Register d, int offset) {
+ if (s2.is_register()) sub(s1, s2.as_register(), d);
+ else { sub(s1, s2.as_constant() + offset, d); offset = 0; }
+ if (offset != 0) sub(d, offset, d);
+}
+
+inline void MacroAssembler::swap(Address& a, Register d, int offset) {
+ relocate(a.rspec(offset));
+ if (a.has_index()) { assert(offset == 0, ""); swap(a.base(), a.index(), d ); }
+ else { swap(a.base(), a.disp() + offset, d); }
+}
+
+#endif // CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP
--- a/hotspot/src/cpu/sparc/vm/metaspaceShared_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/metaspaceShared_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
-#include "assembler_sparc.inline.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "asm/codeBuffer.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
--- a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "prims/methodHandles.hpp"
--- a/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "assembler_sparc.inline.hpp"
+#include "asm/macroAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/oop.inline.hpp"
--- a/hotspot/src/cpu/sparc/vm/nativeInst_sparc.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/nativeInst_sparc.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,7 +25,7 @@
#ifndef CPU_SPARC_VM_NATIVEINST_SPARC_HPP
#define CPU_SPARC_VM_NATIVEINST_SPARC_HPP
-#include "asm/assembler.hpp"
+#include "asm/macroAssembler.hpp"
#include "memory/allocation.hpp"
#include "runtime/icache.hpp"
#include "runtime/os.hpp"
@@ -194,11 +194,10 @@
static int inv_simm( int x, int nbits ) { return Assembler::inv_simm(x, nbits); }
static intptr_t inv_wdisp( int x, int nbits ) { return Assembler::inv_wdisp( x, 0, nbits); }
static intptr_t inv_wdisp16( int x ) { return Assembler::inv_wdisp16(x, 0); }
- static int branch_destination_offset(int x) { return Assembler::branch_destination(x, 0); }
+ static int branch_destination_offset(int x) { return MacroAssembler::branch_destination(x, 0); }
static int patch_branch_destination_offset(int dest_offset, int x) {
- return Assembler::patched_branch(dest_offset, x, 0);
+ return MacroAssembler::patched_branch(dest_offset, x, 0);
}
- void set_annul_bit() { set_long_at(0, long_at(0) | Assembler::annul(true)); }
// utility for checking if x is either of 2 small constants
static bool is_either(int x, int k1, int k2) {
@@ -889,7 +888,6 @@
int patched_instr = patch_branch_destination_offset(dest - addr_at(0), long_at(0));
set_long_at(0, patched_instr);
}
- void set_annul() { set_annul_bit(); }
NativeInstruction *delay_slot_instr() { return nativeInstruction_at(addr_at(4));}
void fill_delay_slot(int instr) { set_long_at(4, instr);}
Assembler::Condition condition() {
--- a/hotspot/src/cpu/sparc/vm/relocInfo_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/relocInfo_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.inline.hpp"
-#include "assembler_sparc.inline.hpp"
+#include "asm/assembler.hpp"
#include "code/relocInfo.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/oop.inline.hpp"
--- a/hotspot/src/cpu/sparc/vm/runtime_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/runtime_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -24,8 +24,7 @@
#include "precompiled.hpp"
#ifdef COMPILER2
-#include "asm/assembler.hpp"
-#include "assembler_sparc.inline.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/vmreg.hpp"
#include "interpreter/interpreter.hpp"
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_sparc.inline.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_sparc.inline.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/instanceOop.hpp"
@@ -37,13 +36,8 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
#include "utilities/top.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
--- a/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,12 +26,7 @@
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/stubRoutines.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
+#include "runtime/thread.inline.hpp"
// Implementation of the platform-specific part of StubRoutines - for
// a description of how to extend it, see the stubRoutines.hpp file.
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
+#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"
@@ -496,7 +496,7 @@
const Address size_of_parameters(G5_method, Method::size_of_parameters_offset());
const Address size_of_locals (G5_method, Method::size_of_locals_offset());
- const Address max_stack (G5_method, Method::max_stack_offset());
+ const Address constMethod (G5_method, Method::const_offset());
int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
const int extra_space =
@@ -538,7 +538,8 @@
// see if the frame is greater than one page in size. If so,
// then we need to verify there is enough stack space remaining
// Frame_size = (max_stack + extra_space) * BytesPerWord;
- __ lduh( max_stack, Gframe_size );
+ __ ld_ptr( constMethod, Gframe_size );
+ __ lduh( Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size );
__ add( Gframe_size, extra_space, Gframe_size );
__ round_to( Gframe_size, WordsPerLong );
__ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size);
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "assembler_sparc.inline.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/java.hpp"
#include "runtime/stubCodeGenerator.hpp"
--- a/hotspot/src/cpu/sparc/vm/vmreg_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/vmreg_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
#include "code/vmreg.hpp"
--- a/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_sparc.inline.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_sparc.hpp"
#include "memory/resourceArea.hpp"
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/assembler.hpp"
+#include "asm/assembler.inline.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/cardTableModRefBS.hpp"
@@ -1154,7 +1155,7 @@
assert(entry != NULL, "call most probably wrong");
InstructionMark im(this);
emit_byte(0xE8);
- intptr_t disp = entry - (_code_pos + sizeof(int32_t));
+ intptr_t disp = entry - (pc() + sizeof(int32_t));
assert(is_simm32(disp), "must be 32bit offset (call2)");
// Technically, should use call32_operand, but this format is
// implied by the fact that we're emitting a call instruction.
@@ -1167,6 +1168,10 @@
emit_byte(0x99);
}
+void Assembler::cld() {
+ emit_byte(0xfc);
+}
+
void Assembler::cmovl(Condition cc, Register dst, Register src) {
NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
int encode = prefix_and_encode(dst->encoding(), src->encoding());
@@ -1260,6 +1265,11 @@
emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE);
}
+void Assembler::cpuid() {
+ emit_byte(0x0F);
+ emit_byte(0xA2);
+}
+
void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
emit_simd_arith_nonds(0xE6, dst, src, VEX_SIMD_F3);
@@ -1417,7 +1427,7 @@
const int short_size = 2;
const int long_size = 6;
- intptr_t offs = (intptr_t)dst - (intptr_t)_code_pos;
+ intptr_t offs = (intptr_t)dst - (intptr_t)pc();
if (maybe_short && is8bit(offs - short_size)) {
// 0111 tttn #8-bit disp
emit_byte(0x70 | cc);
@@ -1447,14 +1457,14 @@
const int short_size = 2;
address entry = target(L);
#ifdef ASSERT
- intptr_t dist = (intptr_t)entry - ((intptr_t)_code_pos + short_size);
+ intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
intptr_t delta = short_branch_delta();
if (delta != 0) {
dist += (dist < 0 ? (-delta) :delta);
}
assert(is8bit(dist), "Dispacement too large for a short jmp");
#endif
- intptr_t offs = (intptr_t)entry - (intptr_t)_code_pos;
+ intptr_t offs = (intptr_t)entry - (intptr_t)pc();
// 0111 tttn #8-bit disp
emit_byte(0x70 | cc);
emit_byte((offs - short_size) & 0xFF);
@@ -1480,7 +1490,7 @@
InstructionMark im(this);
const int short_size = 2;
const int long_size = 5;
- intptr_t offs = entry - _code_pos;
+ intptr_t offs = entry - pc();
if (maybe_short && is8bit(offs - short_size)) {
emit_byte(0xEB);
emit_byte((offs - short_size) & 0xFF);
@@ -1510,7 +1520,7 @@
InstructionMark im(this);
emit_byte(0xE9);
assert(dest != NULL, "must have a target");
- intptr_t disp = dest - (_code_pos + sizeof(int32_t));
+ intptr_t disp = dest - (pc() + sizeof(int32_t));
assert(is_simm32(disp), "must be 32bit offset (jmp)");
emit_data(disp, rspec.reloc(), call32_operand);
}
@@ -1521,14 +1531,14 @@
address entry = target(L);
assert(entry != NULL, "jmp most probably wrong");
#ifdef ASSERT
- intptr_t dist = (intptr_t)entry - ((intptr_t)_code_pos + short_size);
+ intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
intptr_t delta = short_branch_delta();
if (delta != 0) {
dist += (dist < 0 ? (-delta) :delta);
}
assert(is8bit(dist), "Dispacement too large for a short jmp");
#endif
- intptr_t offs = entry - _code_pos;
+ intptr_t offs = entry - pc();
emit_byte(0xEB);
emit_byte((offs - short_size) & 0xFF);
} else {
@@ -1558,6 +1568,12 @@
emit_operand(dst, src);
}
+void Assembler::lfence() {
+ emit_byte(0x0F);
+ emit_byte(0xAE);
+ emit_byte(0xE8);
+}
+
void Assembler::lock() {
emit_byte(0xF0);
}
@@ -2671,6 +2687,10 @@
emit_simd_arith(0x51, dst, src, VEX_SIMD_F3);
}
+void Assembler::std() {
+ emit_byte(0xfd);
+}
+
void Assembler::sqrtss(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse(), ""));
emit_simd_arith(0x51, dst, src, VEX_SIMD_F3);
@@ -2816,6 +2836,12 @@
emit_byte(0xc0 | encode);
}
+void Assembler::xgetbv() {
+ emit_byte(0x0F);
+ emit_byte(0x01);
+ emit_byte(0xD0);
+}
+
void Assembler::xorl(Register dst, int32_t imm32) {
prefix(dst);
emit_arith(0x81, 0xF0, dst, imm32);
@@ -4361,7 +4387,7 @@
disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int));
if (!is_simm32(disp)) return false;
- disp = (int64_t)adr._target - ((int64_t)_code_pos + sizeof(int));
+ disp = (int64_t)adr._target - ((int64_t)pc() + sizeof(int));
// Because rip relative is a disp + address_of_next_instruction and we
// don't know the value of address_of_next_instruction we apply a fudge factor
@@ -4392,7 +4418,7 @@
relocInfo::relocType rtype,
int format) {
if (rtype == relocInfo::none) {
- emit_long64(data);
+ emit_int64(data);
} else {
emit_data64(data, Relocation::spec_simple(rtype), format);
}
@@ -4410,7 +4436,7 @@
#ifdef ASSERT
check_relocation(rspec, format);
#endif
- emit_long64(data);
+ emit_int64(data);
}
int Assembler::prefix_and_encode(int reg_enc, bool byteinst) {
@@ -4943,7 +4969,7 @@
InstructionMark im(this);
int encode = prefixq_and_encode(dst->encoding());
emit_byte(0xB8 | encode);
- emit_long64(imm64);
+ emit_int64(imm64);
}
void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
@@ -5417,6043 +5443,3 @@
}
#endif // !LP64
-
-static Assembler::Condition reverse[] = {
- Assembler::noOverflow /* overflow = 0x0 */ ,
- Assembler::overflow /* noOverflow = 0x1 */ ,
- Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ ,
- Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ ,
- Assembler::notZero /* zero = 0x4, equal = 0x4 */ ,
- Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ ,
- Assembler::above /* belowEqual = 0x6 */ ,
- Assembler::belowEqual /* above = 0x7 */ ,
- Assembler::positive /* negative = 0x8 */ ,
- Assembler::negative /* positive = 0x9 */ ,
- Assembler::noParity /* parity = 0xa */ ,
- Assembler::parity /* noParity = 0xb */ ,
- Assembler::greaterEqual /* less = 0xc */ ,
- Assembler::less /* greaterEqual = 0xd */ ,
- Assembler::greater /* lessEqual = 0xe */ ,
- Assembler::lessEqual /* greater = 0xf, */
-
-};
-
-
-// Implementation of MacroAssembler
-
-// First all the versions that have distinct versions depending on 32/64 bit
-// Unless the difference is trivial (1 line or so).
-
-#ifndef _LP64
-
-// 32bit versions
-
-Address MacroAssembler::as_Address(AddressLiteral adr) {
- return Address(adr.target(), adr.rspec());
-}
-
-Address MacroAssembler::as_Address(ArrayAddress adr) {
- return Address::make_array(adr);
-}
-
-int MacroAssembler::biased_locking_enter(Register lock_reg,
- Register obj_reg,
- Register swap_reg,
- Register tmp_reg,
- bool swap_reg_contains_mark,
- Label& done,
- Label* slow_case,
- BiasedLockingCounters* counters) {
- assert(UseBiasedLocking, "why call this otherwise?");
- assert(swap_reg == rax, "swap_reg must be rax, for cmpxchg");
- assert_different_registers(lock_reg, obj_reg, swap_reg);
-
- if (PrintBiasedLockingStatistics && counters == NULL)
- counters = BiasedLocking::counters();
-
- bool need_tmp_reg = false;
- if (tmp_reg == noreg) {
- need_tmp_reg = true;
- tmp_reg = lock_reg;
- } else {
- assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
- }
- assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
- Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
- Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes());
- Address saved_mark_addr(lock_reg, 0);
-
- // Biased locking
- // See whether the lock is currently biased toward our thread and
- // whether the epoch is still valid
- // Note that the runtime guarantees sufficient alignment of JavaThread
- // pointers to allow age to be placed into low bits
- // First check to see whether biasing is even enabled for this object
- Label cas_label;
- int null_check_offset = -1;
- if (!swap_reg_contains_mark) {
- null_check_offset = offset();
- movl(swap_reg, mark_addr);
- }
- if (need_tmp_reg) {
- push(tmp_reg);
- }
- movl(tmp_reg, swap_reg);
- andl(tmp_reg, markOopDesc::biased_lock_mask_in_place);
- cmpl(tmp_reg, markOopDesc::biased_lock_pattern);
- if (need_tmp_reg) {
- pop(tmp_reg);
- }
- jcc(Assembler::notEqual, cas_label);
- // The bias pattern is present in the object's header. Need to check
- // whether the bias owner and the epoch are both still current.
- // Note that because there is no current thread register on x86 we
- // need to store off the mark word we read out of the object to
- // avoid reloading it and needing to recheck invariants below. This
- // store is unfortunate but it makes the overall code shorter and
- // simpler.
- movl(saved_mark_addr, swap_reg);
- if (need_tmp_reg) {
- push(tmp_reg);
- }
- get_thread(tmp_reg);
- xorl(swap_reg, tmp_reg);
- if (swap_reg_contains_mark) {
- null_check_offset = offset();
- }
- movl(tmp_reg, klass_addr);
- xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset()));
- andl(swap_reg, ~((int) markOopDesc::age_mask_in_place));
- if (need_tmp_reg) {
- pop(tmp_reg);
- }
- if (counters != NULL) {
- cond_inc32(Assembler::zero,
- ExternalAddress((address)counters->biased_lock_entry_count_addr()));
- }
- jcc(Assembler::equal, done);
-
- Label try_revoke_bias;
- Label try_rebias;
-
- // At this point we know that the header has the bias pattern and
- // that we are not the bias owner in the current epoch. We need to
- // figure out more details about the state of the header in order to
- // know what operations can be legally performed on the object's
- // header.
-
- // If the low three bits in the xor result aren't clear, that means
- // the prototype header is no longer biased and we have to revoke
- // the bias on this object.
- testl(swap_reg, markOopDesc::biased_lock_mask_in_place);
- jcc(Assembler::notZero, try_revoke_bias);
-
- // Biasing is still enabled for this data type. See whether the
- // epoch of the current bias is still valid, meaning that the epoch
- // bits of the mark word are equal to the epoch bits of the
- // prototype header. (Note that the prototype header's epoch bits
- // only change at a safepoint.) If not, attempt to rebias the object
- // toward the current thread. Note that we must be absolutely sure
- // that the current epoch is invalid in order to do this because
- // otherwise the manipulations it performs on the mark word are
- // illegal.
- testl(swap_reg, markOopDesc::epoch_mask_in_place);
- jcc(Assembler::notZero, try_rebias);
-
- // The epoch of the current bias is still valid but we know nothing
- // about the owner; it might be set or it might be clear. Try to
- // acquire the bias of the object using an atomic operation. If this
- // fails we will go in to the runtime to revoke the object's bias.
- // Note that we first construct the presumed unbiased header so we
- // don't accidentally blow away another thread's valid bias.
- movl(swap_reg, saved_mark_addr);
- andl(swap_reg,
- markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
- if (need_tmp_reg) {
- push(tmp_reg);
- }
- get_thread(tmp_reg);
- orl(tmp_reg, swap_reg);
- if (os::is_MP()) {
- lock();
- }
- cmpxchgptr(tmp_reg, Address(obj_reg, 0));
- if (need_tmp_reg) {
- pop(tmp_reg);
- }
- // If the biasing toward our thread failed, this means that
- // another thread succeeded in biasing it toward itself and we
- // need to revoke that bias. The revocation will occur in the
- // interpreter runtime in the slow case.
- if (counters != NULL) {
- cond_inc32(Assembler::zero,
- ExternalAddress((address)counters->anonymously_biased_lock_entry_count_addr()));
- }
- if (slow_case != NULL) {
- jcc(Assembler::notZero, *slow_case);
- }
- jmp(done);
-
- bind(try_rebias);
- // At this point we know the epoch has expired, meaning that the
- // current "bias owner", if any, is actually invalid. Under these
- // circumstances _only_, we are allowed to use the current header's
- // value as the comparison value when doing the cas to acquire the
- // bias in the current epoch. In other words, we allow transfer of
- // the bias from one thread to another directly in this situation.
- //
- // FIXME: due to a lack of registers we currently blow away the age
- // bits in this situation. Should attempt to preserve them.
- if (need_tmp_reg) {
- push(tmp_reg);
- }
- get_thread(tmp_reg);
- movl(swap_reg, klass_addr);
- orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset()));
- movl(swap_reg, saved_mark_addr);
- if (os::is_MP()) {
- lock();
- }
- cmpxchgptr(tmp_reg, Address(obj_reg, 0));
- if (need_tmp_reg) {
- pop(tmp_reg);
- }
- // If the biasing toward our thread failed, then another thread
- // succeeded in biasing it toward itself and we need to revoke that
- // bias. The revocation will occur in the runtime in the slow case.
- if (counters != NULL) {
- cond_inc32(Assembler::zero,
- ExternalAddress((address)counters->rebiased_lock_entry_count_addr()));
- }
- if (slow_case != NULL) {
- jcc(Assembler::notZero, *slow_case);
- }
- jmp(done);
-
- bind(try_revoke_bias);
- // The prototype mark in the klass doesn't have the bias bit set any
- // more, indicating that objects of this data type are not supposed
- // to be biased any more. We are going to try to reset the mark of
- // this object to the prototype value and fall through to the
- // CAS-based locking scheme. Note that if our CAS fails, it means
- // that another thread raced us for the privilege of revoking the
- // bias of this particular object, so it's okay to continue in the
- // normal locking code.
- //
- // FIXME: due to a lack of registers we currently blow away the age
- // bits in this situation. Should attempt to preserve them.
- movl(swap_reg, saved_mark_addr);
- if (need_tmp_reg) {
- push(tmp_reg);
- }
- movl(tmp_reg, klass_addr);
- movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset()));
- if (os::is_MP()) {
- lock();
- }
- cmpxchgptr(tmp_reg, Address(obj_reg, 0));
- if (need_tmp_reg) {
- pop(tmp_reg);
- }
- // Fall through to the normal CAS-based lock, because no matter what
- // the result of the above CAS, some thread must have succeeded in
- // removing the bias bit from the object's header.
- if (counters != NULL) {
- cond_inc32(Assembler::zero,
- ExternalAddress((address)counters->revoked_lock_entry_count_addr()));
- }
-
- bind(cas_label);
-
- return null_check_offset;
-}
-void MacroAssembler::call_VM_leaf_base(address entry_point,
- int number_of_arguments) {
- call(RuntimeAddress(entry_point));
- increment(rsp, number_of_arguments * wordSize);
-}
-
-void MacroAssembler::cmpklass(Address src1, Metadata* obj) {
- cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
-}
-
-void MacroAssembler::cmpklass(Register src1, Metadata* obj) {
- cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
-}
-
-void MacroAssembler::cmpoop(Address src1, jobject obj) {
- cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
-}
-
-void MacroAssembler::cmpoop(Register src1, jobject obj) {
- cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
-}
-
-void MacroAssembler::extend_sign(Register hi, Register lo) {
- // According to Intel Doc. AP-526, "Integer Divide", p.18.
- if (VM_Version::is_P6() && hi == rdx && lo == rax) {
- cdql();
- } else {
- movl(hi, lo);
- sarl(hi, 31);
- }
-}
-
-void MacroAssembler::jC2(Register tmp, Label& L) {
- // set parity bit if FPU flag C2 is set (via rax)
- save_rax(tmp);
- fwait(); fnstsw_ax();
- sahf();
- restore_rax(tmp);
- // branch
- jcc(Assembler::parity, L);
-}
-
-void MacroAssembler::jnC2(Register tmp, Label& L) {
- // set parity bit if FPU flag C2 is set (via rax)
- save_rax(tmp);
- fwait(); fnstsw_ax();
- sahf();
- restore_rax(tmp);
- // branch
- jcc(Assembler::noParity, L);
-}
-
-// 32bit can do a case table jump in one instruction but we no longer allow the base
-// to be installed in the Address class
-void MacroAssembler::jump(ArrayAddress entry) {
- jmp(as_Address(entry));
-}
-
-// Note: y_lo will be destroyed
-void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
- // Long compare for Java (semantics as described in JVM spec.)
- Label high, low, done;
-
- cmpl(x_hi, y_hi);
- jcc(Assembler::less, low);
- jcc(Assembler::greater, high);
- // x_hi is the return register
- xorl(x_hi, x_hi);
- cmpl(x_lo, y_lo);
- jcc(Assembler::below, low);
- jcc(Assembler::equal, done);
-
- bind(high);
- xorl(x_hi, x_hi);
- increment(x_hi);
- jmp(done);
-
- bind(low);
- xorl(x_hi, x_hi);
- decrementl(x_hi);
-
- bind(done);
-}
-
-void MacroAssembler::lea(Register dst, AddressLiteral src) {
- mov_literal32(dst, (int32_t)src.target(), src.rspec());
-}
-
-void MacroAssembler::lea(Address dst, AddressLiteral adr) {
- // leal(dst, as_Address(adr));
- // see note in movl as to why we must use a move
- mov_literal32(dst, (int32_t) adr.target(), adr.rspec());
-}
-
-void MacroAssembler::leave() {
- mov(rsp, rbp);
- pop(rbp);
-}
-
-void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) {
- // Multiplication of two Java long values stored on the stack
- // as illustrated below. Result is in rdx:rax.
- //
- // rsp ---> [ ?? ] \ \
- // .... | y_rsp_offset |
- // [ y_lo ] / (in bytes) | x_rsp_offset
- // [ y_hi ] | (in bytes)
- // .... |
- // [ x_lo ] /
- // [ x_hi ]
- // ....
- //
- // Basic idea: lo(result) = lo(x_lo * y_lo)
- // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
- Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset);
- Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset);
- Label quick;
- // load x_hi, y_hi and check if quick
- // multiplication is possible
- movl(rbx, x_hi);
- movl(rcx, y_hi);
- movl(rax, rbx);
- orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0
- jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply
- // do full multiplication
- // 1st step
- mull(y_lo); // x_hi * y_lo
- movl(rbx, rax); // save lo(x_hi * y_lo) in rbx,
- // 2nd step
- movl(rax, x_lo);
- mull(rcx); // x_lo * y_hi
- addl(rbx, rax); // add lo(x_lo * y_hi) to rbx,
- // 3rd step
- bind(quick); // note: rbx, = 0 if quick multiply!
- movl(rax, x_lo);
- mull(y_lo); // x_lo * y_lo
- addl(rdx, rbx); // correct hi(x_lo * y_lo)
-}
-
-void MacroAssembler::lneg(Register hi, Register lo) {
- negl(lo);
- adcl(hi, 0);
- negl(hi);
-}
-
-void MacroAssembler::lshl(Register hi, Register lo) {
- // Java shift left long support (semantics as described in JVM spec., p.305)
- // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n))
- // shift value is in rcx !
- assert(hi != rcx, "must not use rcx");
- assert(lo != rcx, "must not use rcx");
- const Register s = rcx; // shift count
- const int n = BitsPerWord;
- Label L;
- andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
- cmpl(s, n); // if (s < n)
- jcc(Assembler::less, L); // else (s >= n)
- movl(hi, lo); // x := x << n
- xorl(lo, lo);
- // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
- bind(L); // s (mod n) < n
- shldl(hi, lo); // x := x << s
- shll(lo);
-}
-
-
-void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) {
- // Java shift right long support (semantics as described in JVM spec., p.306 & p.310)
- // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n))
- assert(hi != rcx, "must not use rcx");
- assert(lo != rcx, "must not use rcx");
- const Register s = rcx; // shift count
- const int n = BitsPerWord;
- Label L;
- andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
- cmpl(s, n); // if (s < n)
- jcc(Assembler::less, L); // else (s >= n)
- movl(lo, hi); // x := x >> n
- if (sign_extension) sarl(hi, 31);
- else xorl(hi, hi);
- // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
- bind(L); // s (mod n) < n
- shrdl(lo, hi); // x := x >> s
- if (sign_extension) sarl(hi);
- else shrl(hi);
-}
-
-void MacroAssembler::movoop(Register dst, jobject obj) {
- mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
-}
-
-void MacroAssembler::movoop(Address dst, jobject obj) {
- mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
-}
-
-void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
- mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
-}
-
-void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
- mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
-}
-
-void MacroAssembler::movptr(Register dst, AddressLiteral src) {
- if (src.is_lval()) {
- mov_literal32(dst, (intptr_t)src.target(), src.rspec());
- } else {
- movl(dst, as_Address(src));
- }
-}
-
-void MacroAssembler::movptr(ArrayAddress dst, Register src) {
- movl(as_Address(dst), src);
-}
-
-void MacroAssembler::movptr(Register dst, ArrayAddress src) {
- movl(dst, as_Address(src));
-}
-
-// src should NEVER be a real pointer. Use AddressLiteral for true pointers
-void MacroAssembler::movptr(Address dst, intptr_t src) {
- movl(dst, src);
-}
-
-
-void MacroAssembler::pop_callee_saved_registers() {
- pop(rcx);
- pop(rdx);
- pop(rdi);
- pop(rsi);
-}
-
-void MacroAssembler::pop_fTOS() {
- fld_d(Address(rsp, 0));
- addl(rsp, 2 * wordSize);
-}
-
-void MacroAssembler::push_callee_saved_registers() {
- push(rsi);
- push(rdi);
- push(rdx);
- push(rcx);
-}
-
-void MacroAssembler::push_fTOS() {
- subl(rsp, 2 * wordSize);
- fstp_d(Address(rsp, 0));
-}
-
-
-void MacroAssembler::pushoop(jobject obj) {
- push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate());
-}
-
-void MacroAssembler::pushklass(Metadata* obj) {
- push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate());
-}
-
-void MacroAssembler::pushptr(AddressLiteral src) {
- if (src.is_lval()) {
- push_literal32((int32_t)src.target(), src.rspec());
- } else {
- pushl(as_Address(src));
- }
-}
-
-void MacroAssembler::set_word_if_not_zero(Register dst) {
- xorl(dst, dst);
- set_byte_if_not_zero(dst);
-}
-
-static void pass_arg0(MacroAssembler* masm, Register arg) {
- masm->push(arg);
-}
-
-static void pass_arg1(MacroAssembler* masm, Register arg) {
- masm->push(arg);
-}
-
-static void pass_arg2(MacroAssembler* masm, Register arg) {
- masm->push(arg);
-}
-
-static void pass_arg3(MacroAssembler* masm, Register arg) {
- masm->push(arg);
-}
-
-#ifndef PRODUCT
-extern "C" void findpc(intptr_t x);
-#endif
-
-void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
- // In order to get locks to work, we need to fake a in_VM state
- JavaThread* thread = JavaThread::current();
- JavaThreadState saved_state = thread->thread_state();
- thread->set_thread_state(_thread_in_vm);
- if (ShowMessageBoxOnError) {
- JavaThread* thread = JavaThread::current();
- JavaThreadState saved_state = thread->thread_state();
- thread->set_thread_state(_thread_in_vm);
- if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
- ttyLocker ttyl;
- BytecodeCounter::print();
- }
- // To see where a verify_oop failed, get $ebx+40/X for this frame.
- // This is the value of eip which points to where verify_oop will return.
- if (os::message_box(msg, "Execution stopped, print registers?")) {
- print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip);
- BREAKPOINT;
- }
- } else {
- ttyLocker ttyl;
- ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
- }
- // Don't assert holding the ttyLock
- assert(false, err_msg("DEBUG MESSAGE: %s", msg));
- ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
-}
-
-void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) {
- ttyLocker ttyl;
- FlagSetting fs(Debugging, true);
- tty->print_cr("eip = 0x%08x", eip);
-#ifndef PRODUCT
- if ((WizardMode || Verbose) && PrintMiscellaneous) {
- tty->cr();
- findpc(eip);
- tty->cr();
- }
-#endif
-#define PRINT_REG(rax) \
- { tty->print("%s = ", #rax); os::print_location(tty, rax); }
- PRINT_REG(rax);
- PRINT_REG(rbx);
- PRINT_REG(rcx);
- PRINT_REG(rdx);
- PRINT_REG(rdi);
- PRINT_REG(rsi);
- PRINT_REG(rbp);
- PRINT_REG(rsp);
-#undef PRINT_REG
- // Print some words near top of staack.
- int* dump_sp = (int*) rsp;
- for (int col1 = 0; col1 < 8; col1++) {
- tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
- os::print_location(tty, *dump_sp++);
- }
- for (int row = 0; row < 16; row++) {
- tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
- for (int col = 0; col < 8; col++) {
- tty->print(" 0x%08x", *dump_sp++);
- }
- tty->cr();
- }
- // Print some instructions around pc:
- Disassembler::decode((address)eip-64, (address)eip);
- tty->print_cr("--------");
- Disassembler::decode((address)eip, (address)eip+32);
-}
-
-void MacroAssembler::stop(const char* msg) {
- ExternalAddress message((address)msg);
- // push address of message
- pushptr(message.addr());
- { Label L; call(L, relocInfo::none); bind(L); } // push eip
- pusha(); // push registers
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
- hlt();
-}
-
-void MacroAssembler::warn(const char* msg) {
- push_CPU_state();
-
- ExternalAddress message((address) msg);
- // push address of message
- pushptr(message.addr());
-
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
- addl(rsp, wordSize); // discard argument
- pop_CPU_state();
-}
-
-void MacroAssembler::print_state() {
- { Label L; call(L, relocInfo::none); bind(L); } // push eip
- pusha(); // push registers
-
- push_CPU_state();
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32)));
- pop_CPU_state();
-
- popa();
- addl(rsp, wordSize);
-}
-
-#else // _LP64
-
-// 64 bit versions
-
-Address MacroAssembler::as_Address(AddressLiteral adr) {
- // amd64 always does this as a pc-rel
- // we can be absolute or disp based on the instruction type
- // jmp/call are displacements others are absolute
- assert(!adr.is_lval(), "must be rval");
- assert(reachable(adr), "must be");
- return Address((int32_t)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc());
-
-}
-
-Address MacroAssembler::as_Address(ArrayAddress adr) {
- AddressLiteral base = adr.base();
- lea(rscratch1, base);
- Address index = adr.index();
- assert(index._disp == 0, "must not have disp"); // maybe it can?
- Address array(rscratch1, index._index, index._scale, index._disp);
- return array;
-}
-
-int MacroAssembler::biased_locking_enter(Register lock_reg,
- Register obj_reg,
- Register swap_reg,
- Register tmp_reg,
- bool swap_reg_contains_mark,
- Label& done,
- Label* slow_case,
- BiasedLockingCounters* counters) {
- assert(UseBiasedLocking, "why call this otherwise?");
- assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
- assert(tmp_reg != noreg, "tmp_reg must be supplied");
- assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
- assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
- Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
- Address saved_mark_addr(lock_reg, 0);
-
- if (PrintBiasedLockingStatistics && counters == NULL)
- counters = BiasedLocking::counters();
-
- // Biased locking
- // See whether the lock is currently biased toward our thread and
- // whether the epoch is still valid
- // Note that the runtime guarantees sufficient alignment of JavaThread
- // pointers to allow age to be placed into low bits
- // First check to see whether biasing is even enabled for this object
- Label cas_label;
- int null_check_offset = -1;
- if (!swap_reg_contains_mark) {
- null_check_offset = offset();
- movq(swap_reg, mark_addr);
- }
- movq(tmp_reg, swap_reg);
- andq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
- cmpq(tmp_reg, markOopDesc::biased_lock_pattern);
- jcc(Assembler::notEqual, cas_label);
- // The bias pattern is present in the object's header. Need to check
- // whether the bias owner and the epoch are both still current.
- load_prototype_header(tmp_reg, obj_reg);
- orq(tmp_reg, r15_thread);
- xorq(tmp_reg, swap_reg);
- andq(tmp_reg, ~((int) markOopDesc::age_mask_in_place));
- if (counters != NULL) {
- cond_inc32(Assembler::zero,
- ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
- }
- jcc(Assembler::equal, done);
-
- Label try_revoke_bias;
- Label try_rebias;
-
- // At this point we know that the header has the bias pattern and
- // that we are not the bias owner in the current epoch. We need to
- // figure out more details about the state of the header in order to
- // know what operations can be legally performed on the object's
- // header.
-
- // If the low three bits in the xor result aren't clear, that means
- // the prototype header is no longer biased and we have to revoke
- // the bias on this object.
- testq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
- jcc(Assembler::notZero, try_revoke_bias);
-
- // Biasing is still enabled for this data type. See whether the
- // epoch of the current bias is still valid, meaning that the epoch
- // bits of the mark word are equal to the epoch bits of the
- // prototype header. (Note that the prototype header's epoch bits
- // only change at a safepoint.) If not, attempt to rebias the object
- // toward the current thread. Note that we must be absolutely sure
- // that the current epoch is invalid in order to do this because
- // otherwise the manipulations it performs on the mark word are
- // illegal.
- testq(tmp_reg, markOopDesc::epoch_mask_in_place);
- jcc(Assembler::notZero, try_rebias);
-
- // The epoch of the current bias is still valid but we know nothing
- // about the owner; it might be set or it might be clear. Try to
- // acquire the bias of the object using an atomic operation. If this
- // fails we will go in to the runtime to revoke the object's bias.
- // Note that we first construct the presumed unbiased header so we
- // don't accidentally blow away another thread's valid bias.
- andq(swap_reg,
- markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
- movq(tmp_reg, swap_reg);
- orq(tmp_reg, r15_thread);
- if (os::is_MP()) {
- lock();
- }
- cmpxchgq(tmp_reg, Address(obj_reg, 0));
- // If the biasing toward our thread failed, this means that
- // another thread succeeded in biasing it toward itself and we
- // need to revoke that bias. The revocation will occur in the
- // interpreter runtime in the slow case.
- if (counters != NULL) {
- cond_inc32(Assembler::zero,
- ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
- }
- if (slow_case != NULL) {
- jcc(Assembler::notZero, *slow_case);
- }
- jmp(done);
-
- bind(try_rebias);
- // At this point we know the epoch has expired, meaning that the
- // current "bias owner", if any, is actually invalid. Under these
- // circumstances _only_, we are allowed to use the current header's
- // value as the comparison value when doing the cas to acquire the
- // bias in the current epoch. In other words, we allow transfer of
- // the bias from one thread to another directly in this situation.
- //
- // FIXME: due to a lack of registers we currently blow away the age
- // bits in this situation. Should attempt to preserve them.
- load_prototype_header(tmp_reg, obj_reg);
- orq(tmp_reg, r15_thread);
- if (os::is_MP()) {
- lock();
- }
- cmpxchgq(tmp_reg, Address(obj_reg, 0));
- // If the biasing toward our thread failed, then another thread
- // succeeded in biasing it toward itself and we need to revoke that
- // bias. The revocation will occur in the runtime in the slow case.
- if (counters != NULL) {
- cond_inc32(Assembler::zero,
- ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
- }
- if (slow_case != NULL) {
- jcc(Assembler::notZero, *slow_case);
- }
- jmp(done);
-
- bind(try_revoke_bias);
- // The prototype mark in the klass doesn't have the bias bit set any
- // more, indicating that objects of this data type are not supposed
- // to be biased any more. We are going to try to reset the mark of
- // this object to the prototype value and fall through to the
- // CAS-based locking scheme. Note that if our CAS fails, it means
- // that another thread raced us for the privilege of revoking the
- // bias of this particular object, so it's okay to continue in the
- // normal locking code.
- //
- // FIXME: due to a lack of registers we currently blow away the age
- // bits in this situation. Should attempt to preserve them.
- load_prototype_header(tmp_reg, obj_reg);
- if (os::is_MP()) {
- lock();
- }
- cmpxchgq(tmp_reg, Address(obj_reg, 0));
- // Fall through to the normal CAS-based lock, because no matter what
- // the result of the above CAS, some thread must have succeeded in
- // removing the bias bit from the object's header.
- if (counters != NULL) {
- cond_inc32(Assembler::zero,
- ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
- }
-
- bind(cas_label);
-
- return null_check_offset;
-}
-
-void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
- Label L, E;
-
-#ifdef _WIN64
- // Windows always allocates space for it's register args
- assert(num_args <= 4, "only register arguments supported");
- subq(rsp, frame::arg_reg_save_area_bytes);
-#endif
-
- // Align stack if necessary
- testl(rsp, 15);
- jcc(Assembler::zero, L);
-
- subq(rsp, 8);
- {
- call(RuntimeAddress(entry_point));
- }
- addq(rsp, 8);
- jmp(E);
-
- bind(L);
- {
- call(RuntimeAddress(entry_point));
- }
-
- bind(E);
-
-#ifdef _WIN64
- // restore stack pointer
- addq(rsp, frame::arg_reg_save_area_bytes);
-#endif
-
-}
-
-void MacroAssembler::cmp64(Register src1, AddressLiteral src2) {
- assert(!src2.is_lval(), "should use cmpptr");
-
- if (reachable(src2)) {
- cmpq(src1, as_Address(src2));
- } else {
- lea(rscratch1, src2);
- Assembler::cmpq(src1, Address(rscratch1, 0));
- }
-}
-
-int MacroAssembler::corrected_idivq(Register reg) {
- // Full implementation of Java ldiv and lrem; checks for special
- // case as described in JVM spec., p.243 & p.271. The function
- // returns the (pc) offset of the idivl instruction - may be needed
- // for implicit exceptions.
- //
- // normal case special case
- //
- // input : rax: dividend min_long
- // reg: divisor (may not be eax/edx) -1
- //
- // output: rax: quotient (= rax idiv reg) min_long
- // rdx: remainder (= rax irem reg) 0
- assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
- static const int64_t min_long = 0x8000000000000000;
- Label normal_case, special_case;
-
- // check for special case
- cmp64(rax, ExternalAddress((address) &min_long));
- jcc(Assembler::notEqual, normal_case);
- xorl(rdx, rdx); // prepare rdx for possible special case (where
- // remainder = 0)
- cmpq(reg, -1);
- jcc(Assembler::equal, special_case);
-
- // handle normal case
- bind(normal_case);
- cdqq();
- int idivq_offset = offset();
- idivq(reg);
-
- // normal and special case exit
- bind(special_case);
-
- return idivq_offset;
-}
-
-void MacroAssembler::decrementq(Register reg, int value) {
- if (value == min_jint) { subq(reg, value); return; }
- if (value < 0) { incrementq(reg, -value); return; }
- if (value == 0) { ; return; }
- if (value == 1 && UseIncDec) { decq(reg) ; return; }
- /* else */ { subq(reg, value) ; return; }
-}
-
-void MacroAssembler::decrementq(Address dst, int value) {
- if (value == min_jint) { subq(dst, value); return; }
- if (value < 0) { incrementq(dst, -value); return; }
- if (value == 0) { ; return; }
- if (value == 1 && UseIncDec) { decq(dst) ; return; }
- /* else */ { subq(dst, value) ; return; }
-}
-
-void MacroAssembler::incrementq(Register reg, int value) {
- if (value == min_jint) { addq(reg, value); return; }
- if (value < 0) { decrementq(reg, -value); return; }
- if (value == 0) { ; return; }
- if (value == 1 && UseIncDec) { incq(reg) ; return; }
- /* else */ { addq(reg, value) ; return; }
-}
-
-void MacroAssembler::incrementq(Address dst, int value) {
- if (value == min_jint) { addq(dst, value); return; }
- if (value < 0) { decrementq(dst, -value); return; }
- if (value == 0) { ; return; }
- if (value == 1 && UseIncDec) { incq(dst) ; return; }
- /* else */ { addq(dst, value) ; return; }
-}
-
-// 32bit can do a case table jump in one instruction but we no longer allow the base
-// to be installed in the Address class
-void MacroAssembler::jump(ArrayAddress entry) {
- lea(rscratch1, entry.base());
- Address dispatch = entry.index();
- assert(dispatch._base == noreg, "must be");
- dispatch._base = rscratch1;
- jmp(dispatch);
-}
-
-void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
- ShouldNotReachHere(); // 64bit doesn't use two regs
- cmpq(x_lo, y_lo);
-}
-
-void MacroAssembler::lea(Register dst, AddressLiteral src) {
- mov_literal64(dst, (intptr_t)src.target(), src.rspec());
-}
-
-void MacroAssembler::lea(Address dst, AddressLiteral adr) {
- mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec());
- movptr(dst, rscratch1);
-}
-
-void MacroAssembler::leave() {
- // %%% is this really better? Why not on 32bit too?
- emit_byte(0xC9); // LEAVE
-}
-
-void MacroAssembler::lneg(Register hi, Register lo) {
- ShouldNotReachHere(); // 64bit doesn't use two regs
- negq(lo);
-}
-
-void MacroAssembler::movoop(Register dst, jobject obj) {
- mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
-}
-
-void MacroAssembler::movoop(Address dst, jobject obj) {
- mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate());
- movq(dst, rscratch1);
-}
-
-void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
- mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
-}
-
-void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
- mov_literal64(rscratch1, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
- movq(dst, rscratch1);
-}
-
-void MacroAssembler::movptr(Register dst, AddressLiteral src) {
- if (src.is_lval()) {
- mov_literal64(dst, (intptr_t)src.target(), src.rspec());
- } else {
- if (reachable(src)) {
- movq(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- movq(dst, Address(rscratch1,0));
- }
- }
-}
-
-void MacroAssembler::movptr(ArrayAddress dst, Register src) {
- movq(as_Address(dst), src);
-}
-
-void MacroAssembler::movptr(Register dst, ArrayAddress src) {
- movq(dst, as_Address(src));
-}
-
-// src should NEVER be a real pointer. Use AddressLiteral for true pointers
-void MacroAssembler::movptr(Address dst, intptr_t src) {
- mov64(rscratch1, src);
- movq(dst, rscratch1);
-}
-
-// These are mostly for initializing NULL
-void MacroAssembler::movptr(Address dst, int32_t src) {
- movslq(dst, src);
-}
-
-void MacroAssembler::movptr(Register dst, int32_t src) {
- mov64(dst, (intptr_t)src);
-}
-
-void MacroAssembler::pushoop(jobject obj) {
- movoop(rscratch1, obj);
- push(rscratch1);
-}
-
-void MacroAssembler::pushklass(Metadata* obj) {
- mov_metadata(rscratch1, obj);
- push(rscratch1);
-}
-
-void MacroAssembler::pushptr(AddressLiteral src) {
- lea(rscratch1, src);
- if (src.is_lval()) {
- push(rscratch1);
- } else {
- pushq(Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::reset_last_Java_frame(bool clear_fp,
- bool clear_pc) {
- // we must set sp to zero to clear frame
- movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
- // must clear fp, so that compiled frames are not confused; it is
- // possible that we need it only for debugging
- if (clear_fp) {
- movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
- }
-
- if (clear_pc) {
- movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
- }
-}
-
-void MacroAssembler::set_last_Java_frame(Register last_java_sp,
- Register last_java_fp,
- address last_java_pc) {
- // determine last_java_sp register
- if (!last_java_sp->is_valid()) {
- last_java_sp = rsp;
- }
-
- // last_java_fp is optional
- if (last_java_fp->is_valid()) {
- movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()),
- last_java_fp);
- }
-
- // last_java_pc is optional
- if (last_java_pc != NULL) {
- Address java_pc(r15_thread,
- JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
- lea(rscratch1, InternalAddress(last_java_pc));
- movptr(java_pc, rscratch1);
- }
-
- movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
-}
-
-static void pass_arg0(MacroAssembler* masm, Register arg) {
- if (c_rarg0 != arg ) {
- masm->mov(c_rarg0, arg);
- }
-}
-
-static void pass_arg1(MacroAssembler* masm, Register arg) {
- if (c_rarg1 != arg ) {
- masm->mov(c_rarg1, arg);
- }
-}
-
-static void pass_arg2(MacroAssembler* masm, Register arg) {
- if (c_rarg2 != arg ) {
- masm->mov(c_rarg2, arg);
- }
-}
-
-static void pass_arg3(MacroAssembler* masm, Register arg) {
- if (c_rarg3 != arg ) {
- masm->mov(c_rarg3, arg);
- }
-}
-
-void MacroAssembler::stop(const char* msg) {
- address rip = pc();
- pusha(); // get regs on stack
- lea(c_rarg0, ExternalAddress((address) msg));
- lea(c_rarg1, InternalAddress(rip));
- movq(c_rarg2, rsp); // pass pointer to regs array
- andq(rsp, -16); // align stack as required by ABI
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
- hlt();
-}
-
-void MacroAssembler::warn(const char* msg) {
- push(rbp);
- movq(rbp, rsp);
- andq(rsp, -16); // align stack as required by push_CPU_state and call
- push_CPU_state(); // keeps alignment at 16 bytes
- lea(c_rarg0, ExternalAddress((address) msg));
- call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
- pop_CPU_state();
- mov(rsp, rbp);
- pop(rbp);
-}
-
-void MacroAssembler::print_state() {
- address rip = pc();
- pusha(); // get regs on stack
- push(rbp);
- movq(rbp, rsp);
- andq(rsp, -16); // align stack as required by push_CPU_state and call
- push_CPU_state(); // keeps alignment at 16 bytes
-
- lea(c_rarg0, InternalAddress(rip));
- lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array
- call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1);
-
- pop_CPU_state();
- mov(rsp, rbp);
- pop(rbp);
- popa();
-}
-
-#ifndef PRODUCT
-extern "C" void findpc(intptr_t x);
-#endif
-
-void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
- // In order to get locks to work, we need to fake a in_VM state
- if (ShowMessageBoxOnError) {
- JavaThread* thread = JavaThread::current();
- JavaThreadState saved_state = thread->thread_state();
- thread->set_thread_state(_thread_in_vm);
-#ifndef PRODUCT
- if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
- ttyLocker ttyl;
- BytecodeCounter::print();
- }
-#endif
- // To see where a verify_oop failed, get $ebx+40/X for this frame.
- // XXX correct this offset for amd64
- // This is the value of eip which points to where verify_oop will return.
- if (os::message_box(msg, "Execution stopped, print registers?")) {
- print_state64(pc, regs);
- BREAKPOINT;
- assert(false, "start up GDB");
- }
- ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
- } else {
- ttyLocker ttyl;
- ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
- msg);
- assert(false, err_msg("DEBUG MESSAGE: %s", msg));
- }
-}
-
-void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
- ttyLocker ttyl;
- FlagSetting fs(Debugging, true);
- tty->print_cr("rip = 0x%016lx", pc);
-#ifndef PRODUCT
- tty->cr();
- findpc(pc);
- tty->cr();
-#endif
-#define PRINT_REG(rax, value) \
- { tty->print("%s = ", #rax); os::print_location(tty, value); }
- PRINT_REG(rax, regs[15]);
- PRINT_REG(rbx, regs[12]);
- PRINT_REG(rcx, regs[14]);
- PRINT_REG(rdx, regs[13]);
- PRINT_REG(rdi, regs[8]);
- PRINT_REG(rsi, regs[9]);
- PRINT_REG(rbp, regs[10]);
- PRINT_REG(rsp, regs[11]);
- PRINT_REG(r8 , regs[7]);
- PRINT_REG(r9 , regs[6]);
- PRINT_REG(r10, regs[5]);
- PRINT_REG(r11, regs[4]);
- PRINT_REG(r12, regs[3]);
- PRINT_REG(r13, regs[2]);
- PRINT_REG(r14, regs[1]);
- PRINT_REG(r15, regs[0]);
-#undef PRINT_REG
- // Print some words near top of staack.
- int64_t* rsp = (int64_t*) regs[11];
- int64_t* dump_sp = rsp;
- for (int col1 = 0; col1 < 8; col1++) {
- tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
- os::print_location(tty, *dump_sp++);
- }
- for (int row = 0; row < 25; row++) {
- tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
- for (int col = 0; col < 4; col++) {
- tty->print(" 0x%016lx", *dump_sp++);
- }
- tty->cr();
- }
- // Print some instructions around pc:
- Disassembler::decode((address)pc-64, (address)pc);
- tty->print_cr("--------");
- Disassembler::decode((address)pc, (address)pc+32);
-}
-
-#endif // _LP64
-
-// Now versions that are common to 32/64 bit
-
-void MacroAssembler::addptr(Register dst, int32_t imm32) {
- LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32));
-}
-
-void MacroAssembler::addptr(Register dst, Register src) {
- LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
-}
-
-void MacroAssembler::addptr(Address dst, Register src) {
- LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
-}
-
-void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- Assembler::addsd(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::addsd(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::addss(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- addss(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- addss(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::align(int modulus) {
- if (offset() % modulus != 0) {
- nop(modulus - (offset() % modulus));
- }
-}
-
-void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
- // Used in sign-masking with aligned address.
- assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
- if (reachable(src)) {
- Assembler::andpd(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::andpd(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::andps(XMMRegister dst, AddressLiteral src) {
- // Used in sign-masking with aligned address.
- assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
- if (reachable(src)) {
- Assembler::andps(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::andps(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::andptr(Register dst, int32_t imm32) {
- LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
-}
-
-void MacroAssembler::atomic_incl(AddressLiteral counter_addr) {
- pushf();
- if (os::is_MP())
- lock();
- incrementl(counter_addr);
- popf();
-}
-
-// Writes to stack successive pages until offset reached to check for
-// stack overflow + shadow pages. This clobbers tmp.
-void MacroAssembler::bang_stack_size(Register size, Register tmp) {
- movptr(tmp, rsp);
- // Bang stack for total size given plus shadow page size.
- // Bang one page at a time because large size can bang beyond yellow and
- // red zones.
- Label loop;
- bind(loop);
- movl(Address(tmp, (-os::vm_page_size())), size );
- subptr(tmp, os::vm_page_size());
- subl(size, os::vm_page_size());
- jcc(Assembler::greater, loop);
-
- // Bang down shadow pages too.
- // The -1 because we already subtracted 1 page.
- for (int i = 0; i< StackShadowPages-1; i++) {
- // this could be any sized move but this is can be a debugging crumb
- // so the bigger the better.
- movptr(Address(tmp, (-i*os::vm_page_size())), size );
- }
-}
-
-void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
- assert(UseBiasedLocking, "why call this otherwise?");
-
- // Check for biased locking unlock case, which is a no-op
- // Note: we do not have to check the thread ID for two reasons.
- // First, the interpreter checks for IllegalMonitorStateException at
- // a higher level. Second, if the bias was revoked while we held the
- // lock, the object could not be rebiased toward another thread, so
- // the bias bit would be clear.
- movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- andptr(temp_reg, markOopDesc::biased_lock_mask_in_place);
- cmpptr(temp_reg, markOopDesc::biased_lock_pattern);
- jcc(Assembler::equal, done);
-}
-
-void MacroAssembler::c2bool(Register x) {
- // implements x == 0 ? 0 : 1
- // note: must only look at least-significant byte of x
- // since C-style booleans are stored in one byte
- // only! (was bug)
- andl(x, 0xFF);
- setb(Assembler::notZero, x);
-}
-
-// Wouldn't need if AddressLiteral version had new name
-void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
- Assembler::call(L, rtype);
-}
-
-void MacroAssembler::call(Register entry) {
- Assembler::call(entry);
-}
-
-void MacroAssembler::call(AddressLiteral entry) {
- if (reachable(entry)) {
- Assembler::call_literal(entry.target(), entry.rspec());
- } else {
- lea(rscratch1, entry);
- Assembler::call(rscratch1);
- }
-}
-
-void MacroAssembler::ic_call(address entry) {
- RelocationHolder rh = virtual_call_Relocation::spec(pc());
- movptr(rax, (intptr_t)Universe::non_oop_word());
- call(AddressLiteral(entry, rh));
-}
-
-// Implementation of call_VM versions
-
-void MacroAssembler::call_VM(Register oop_result,
- address entry_point,
- bool check_exceptions) {
- Label C, E;
- call(C, relocInfo::none);
- jmp(E);
-
- bind(C);
- call_VM_helper(oop_result, entry_point, 0, check_exceptions);
- ret(0);
-
- bind(E);
-}
-
-void MacroAssembler::call_VM(Register oop_result,
- address entry_point,
- Register arg_1,
- bool check_exceptions) {
- Label C, E;
- call(C, relocInfo::none);
- jmp(E);
-
- bind(C);
- pass_arg1(this, arg_1);
- call_VM_helper(oop_result, entry_point, 1, check_exceptions);
- ret(0);
-
- bind(E);
-}
-
-void MacroAssembler::call_VM(Register oop_result,
- address entry_point,
- Register arg_1,
- Register arg_2,
- bool check_exceptions) {
- Label C, E;
- call(C, relocInfo::none);
- jmp(E);
-
- bind(C);
-
- LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
-
- pass_arg2(this, arg_2);
- pass_arg1(this, arg_1);
- call_VM_helper(oop_result, entry_point, 2, check_exceptions);
- ret(0);
-
- bind(E);
-}
-
-void MacroAssembler::call_VM(Register oop_result,
- address entry_point,
- Register arg_1,
- Register arg_2,
- Register arg_3,
- bool check_exceptions) {
- Label C, E;
- call(C, relocInfo::none);
- jmp(E);
-
- bind(C);
-
- LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
- LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
- pass_arg3(this, arg_3);
-
- LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
- pass_arg2(this, arg_2);
-
- pass_arg1(this, arg_1);
- call_VM_helper(oop_result, entry_point, 3, check_exceptions);
- ret(0);
-
- bind(E);
-}
-
-void MacroAssembler::call_VM(Register oop_result,
- Register last_java_sp,
- address entry_point,
- int number_of_arguments,
- bool check_exceptions) {
- Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
- call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
-}
-
-void MacroAssembler::call_VM(Register oop_result,
- Register last_java_sp,
- address entry_point,
- Register arg_1,
- bool check_exceptions) {
- pass_arg1(this, arg_1);
- call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
-}
-
-void MacroAssembler::call_VM(Register oop_result,
- Register last_java_sp,
- address entry_point,
- Register arg_1,
- Register arg_2,
- bool check_exceptions) {
-
- LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
- pass_arg2(this, arg_2);
- pass_arg1(this, arg_1);
- call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
-}
-
-void MacroAssembler::call_VM(Register oop_result,
- Register last_java_sp,
- address entry_point,
- Register arg_1,
- Register arg_2,
- Register arg_3,
- bool check_exceptions) {
- LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
- LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
- pass_arg3(this, arg_3);
- LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
- pass_arg2(this, arg_2);
- pass_arg1(this, arg_1);
- call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
-}
-
-void MacroAssembler::super_call_VM(Register oop_result,
- Register last_java_sp,
- address entry_point,
- int number_of_arguments,
- bool check_exceptions) {
- Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
- MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
-}
-
-void MacroAssembler::super_call_VM(Register oop_result,
- Register last_java_sp,
- address entry_point,
- Register arg_1,
- bool check_exceptions) {
- pass_arg1(this, arg_1);
- super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
-}
-
-void MacroAssembler::super_call_VM(Register oop_result,
- Register last_java_sp,
- address entry_point,
- Register arg_1,
- Register arg_2,
- bool check_exceptions) {
-
- LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
- pass_arg2(this, arg_2);
- pass_arg1(this, arg_1);
- super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
-}
-
-void MacroAssembler::super_call_VM(Register oop_result,
- Register last_java_sp,
- address entry_point,
- Register arg_1,
- Register arg_2,
- Register arg_3,
- bool check_exceptions) {
- LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
- LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
- pass_arg3(this, arg_3);
- LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
- pass_arg2(this, arg_2);
- pass_arg1(this, arg_1);
- super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
-}
-
-void MacroAssembler::call_VM_base(Register oop_result,
- Register java_thread,
- Register last_java_sp,
- address entry_point,
- int number_of_arguments,
- bool check_exceptions) {
- // determine java_thread register
- if (!java_thread->is_valid()) {
-#ifdef _LP64
- java_thread = r15_thread;
-#else
- java_thread = rdi;
- get_thread(java_thread);
-#endif // LP64
- }
- // determine last_java_sp register
- if (!last_java_sp->is_valid()) {
- last_java_sp = rsp;
- }
- // debugging support
- assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
- LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
-#ifdef ASSERT
- // TraceBytecodes does not use r12 but saves it over the call, so don't verify
- // r12 is the heapbase.
- LP64_ONLY(if ((UseCompressedOops || UseCompressedKlassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
-#endif // ASSERT
-
- assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
- assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
-
- // push java thread (becomes first argument of C function)
-
- NOT_LP64(push(java_thread); number_of_arguments++);
- LP64_ONLY(mov(c_rarg0, r15_thread));
-
- // set last Java frame before call
- assert(last_java_sp != rbp, "can't use ebp/rbp");
-
- // Only interpreter should have to set fp
- set_last_Java_frame(java_thread, last_java_sp, rbp, NULL);
-
- // do the call, remove parameters
- MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
-
- // restore the thread (cannot use the pushed argument since arguments
- // may be overwritten by C code generated by an optimizing compiler);
- // however can use the register value directly if it is callee saved.
- if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) {
- // rdi & rsi (also r15) are callee saved -> nothing to do
-#ifdef ASSERT
- guarantee(java_thread != rax, "change this code");
- push(rax);
- { Label L;
- get_thread(rax);
- cmpptr(java_thread, rax);
- jcc(Assembler::equal, L);
- STOP("MacroAssembler::call_VM_base: rdi not callee saved?");
- bind(L);
- }
- pop(rax);
-#endif
- } else {
- get_thread(java_thread);
- }
- // reset last Java frame
- // Only interpreter should have to clear fp
- reset_last_Java_frame(java_thread, true, false);
-
-#ifndef CC_INTERP
- // C++ interp handles this in the interpreter
- check_and_handle_popframe(java_thread);
- check_and_handle_earlyret(java_thread);
-#endif /* CC_INTERP */
-
- if (check_exceptions) {
- // check for pending exceptions (java_thread is set upon return)
- cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
-#ifndef _LP64
- jump_cc(Assembler::notEqual,
- RuntimeAddress(StubRoutines::forward_exception_entry()));
-#else
- // This used to conditionally jump to forward_exception however it is
- // possible if we relocate that the branch will not reach. So we must jump
- // around so we can always reach
-
- Label ok;
- jcc(Assembler::equal, ok);
- jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
- bind(ok);
-#endif // LP64
- }
-
- // get oop result if there is one and reset the value in the thread
- if (oop_result->is_valid()) {
- get_vm_result(oop_result, java_thread);
- }
-}
-
-void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
-
- // Calculate the value for last_Java_sp
- // somewhat subtle. call_VM does an intermediate call
- // which places a return address on the stack just under the
- // stack pointer as the user finsihed with it. This allows
- // use to retrieve last_Java_pc from last_Java_sp[-1].
- // On 32bit we then have to push additional args on the stack to accomplish
- // the actual requested call. On 64bit call_VM only can use register args
- // so the only extra space is the return address that call_VM created.
- // This hopefully explains the calculations here.
-
-#ifdef _LP64
- // We've pushed one address, correct last_Java_sp
- lea(rax, Address(rsp, wordSize));
-#else
- lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize));
-#endif // LP64
-
- call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions);
-
-}
-
-void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
- call_VM_leaf_base(entry_point, number_of_arguments);
-}
-
-void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
- pass_arg0(this, arg_0);
- call_VM_leaf(entry_point, 1);
-}
-
-void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
-
- LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
- pass_arg1(this, arg_1);
- pass_arg0(this, arg_0);
- call_VM_leaf(entry_point, 2);
-}
-
-void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
- LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
- LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
- pass_arg2(this, arg_2);
- LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
- pass_arg1(this, arg_1);
- pass_arg0(this, arg_0);
- call_VM_leaf(entry_point, 3);
-}
-
-void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
- pass_arg0(this, arg_0);
- MacroAssembler::call_VM_leaf_base(entry_point, 1);
-}
-
-void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
-
- LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
- pass_arg1(this, arg_1);
- pass_arg0(this, arg_0);
- MacroAssembler::call_VM_leaf_base(entry_point, 2);
-}
-
-void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
- LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
- LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
- pass_arg2(this, arg_2);
- LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
- pass_arg1(this, arg_1);
- pass_arg0(this, arg_0);
- MacroAssembler::call_VM_leaf_base(entry_point, 3);
-}
-
-void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
- LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg"));
- LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
- LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
- pass_arg3(this, arg_3);
- LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
- LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
- pass_arg2(this, arg_2);
- LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
- pass_arg1(this, arg_1);
- pass_arg0(this, arg_0);
- MacroAssembler::call_VM_leaf_base(entry_point, 4);
-}
-
-void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
- movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
- movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD);
- verify_oop(oop_result, "broken oop in call_VM_base");
-}
-
-void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
- movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
- movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD);
-}
-
-void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
-}
-
-void MacroAssembler::check_and_handle_popframe(Register java_thread) {
-}
-
-void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) {
- if (reachable(src1)) {
- cmpl(as_Address(src1), imm);
- } else {
- lea(rscratch1, src1);
- cmpl(Address(rscratch1, 0), imm);
- }
-}
-
-void MacroAssembler::cmp32(Register src1, AddressLiteral src2) {
- assert(!src2.is_lval(), "use cmpptr");
- if (reachable(src2)) {
- cmpl(src1, as_Address(src2));
- } else {
- lea(rscratch1, src2);
- cmpl(src1, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::cmp32(Register src1, int32_t imm) {
- Assembler::cmpl(src1, imm);
-}
-
-void MacroAssembler::cmp32(Register src1, Address src2) {
- Assembler::cmpl(src1, src2);
-}
-
-void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
- ucomisd(opr1, opr2);
-
- Label L;
- if (unordered_is_less) {
- movl(dst, -1);
- jcc(Assembler::parity, L);
- jcc(Assembler::below , L);
- movl(dst, 0);
- jcc(Assembler::equal , L);
- increment(dst);
- } else { // unordered is greater
- movl(dst, 1);
- jcc(Assembler::parity, L);
- jcc(Assembler::above , L);
- movl(dst, 0);
- jcc(Assembler::equal , L);
- decrementl(dst);
- }
- bind(L);
-}
-
-void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
- ucomiss(opr1, opr2);
-
- Label L;
- if (unordered_is_less) {
- movl(dst, -1);
- jcc(Assembler::parity, L);
- jcc(Assembler::below , L);
- movl(dst, 0);
- jcc(Assembler::equal , L);
- increment(dst);
- } else { // unordered is greater
- movl(dst, 1);
- jcc(Assembler::parity, L);
- jcc(Assembler::above , L);
- movl(dst, 0);
- jcc(Assembler::equal , L);
- decrementl(dst);
- }
- bind(L);
-}
-
-
-void MacroAssembler::cmp8(AddressLiteral src1, int imm) {
- if (reachable(src1)) {
- cmpb(as_Address(src1), imm);
- } else {
- lea(rscratch1, src1);
- cmpb(Address(rscratch1, 0), imm);
- }
-}
-
-void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) {
-#ifdef _LP64
- if (src2.is_lval()) {
- movptr(rscratch1, src2);
- Assembler::cmpq(src1, rscratch1);
- } else if (reachable(src2)) {
- cmpq(src1, as_Address(src2));
- } else {
- lea(rscratch1, src2);
- Assembler::cmpq(src1, Address(rscratch1, 0));
- }
-#else
- if (src2.is_lval()) {
- cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
- } else {
- cmpl(src1, as_Address(src2));
- }
-#endif // _LP64
-}
-
-void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) {
- assert(src2.is_lval(), "not a mem-mem compare");
-#ifdef _LP64
- // moves src2's literal address
- movptr(rscratch1, src2);
- Assembler::cmpq(src1, rscratch1);
-#else
- cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
-#endif // _LP64
-}
-
-void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) {
- if (reachable(adr)) {
- if (os::is_MP())
- lock();
- cmpxchgptr(reg, as_Address(adr));
- } else {
- lea(rscratch1, adr);
- if (os::is_MP())
- lock();
- cmpxchgptr(reg, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
- LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr));
-}
-
-void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- Assembler::comisd(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::comisd(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- Assembler::comiss(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::comiss(dst, Address(rscratch1, 0));
- }
-}
-
-
-void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
- Condition negated_cond = negate_condition(cond);
- Label L;
- jcc(negated_cond, L);
- atomic_incl(counter_addr);
- bind(L);
-}
-
-int MacroAssembler::corrected_idivl(Register reg) {
- // Full implementation of Java idiv and irem; checks for
- // special case as described in JVM spec., p.243 & p.271.
- // The function returns the (pc) offset of the idivl
- // instruction - may be needed for implicit exceptions.
- //
- // normal case special case
- //
- // input : rax,: dividend min_int
- // reg: divisor (may not be rax,/rdx) -1
- //
- // output: rax,: quotient (= rax, idiv reg) min_int
- // rdx: remainder (= rax, irem reg) 0
- assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
- const int min_int = 0x80000000;
- Label normal_case, special_case;
-
- // check for special case
- cmpl(rax, min_int);
- jcc(Assembler::notEqual, normal_case);
- xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
- cmpl(reg, -1);
- jcc(Assembler::equal, special_case);
-
- // handle normal case
- bind(normal_case);
- cdql();
- int idivl_offset = offset();
- idivl(reg);
-
- // normal and special case exit
- bind(special_case);
-
- return idivl_offset;
-}
-
-
-
-void MacroAssembler::decrementl(Register reg, int value) {
- if (value == min_jint) {subl(reg, value) ; return; }
- if (value < 0) { incrementl(reg, -value); return; }
- if (value == 0) { ; return; }
- if (value == 1 && UseIncDec) { decl(reg) ; return; }
- /* else */ { subl(reg, value) ; return; }
-}
-
-void MacroAssembler::decrementl(Address dst, int value) {
- if (value == min_jint) {subl(dst, value) ; return; }
- if (value < 0) { incrementl(dst, -value); return; }
- if (value == 0) { ; return; }
- if (value == 1 && UseIncDec) { decl(dst) ; return; }
- /* else */ { subl(dst, value) ; return; }
-}
-
-void MacroAssembler::division_with_shift (Register reg, int shift_value) {
- assert (shift_value > 0, "illegal shift value");
- Label _is_positive;
- testl (reg, reg);
- jcc (Assembler::positive, _is_positive);
- int offset = (1 << shift_value) - 1 ;
-
- if (offset == 1) {
- incrementl(reg);
- } else {
- addl(reg, offset);
- }
-
- bind (_is_positive);
- sarl(reg, shift_value);
-}
-
-void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- Assembler::divsd(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::divsd(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::divss(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- Assembler::divss(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::divss(dst, Address(rscratch1, 0));
- }
-}
-
-// !defined(COMPILER2) is because of stupid core builds
-#if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
-void MacroAssembler::empty_FPU_stack() {
- if (VM_Version::supports_mmx()) {
- emms();
- } else {
- for (int i = 8; i-- > 0; ) ffree(i);
- }
-}
-#endif // !LP64 || C1 || !C2
-
-
-// Defines obj, preserves var_size_in_bytes
-void MacroAssembler::eden_allocate(Register obj,
- Register var_size_in_bytes,
- int con_size_in_bytes,
- Register t1,
- Label& slow_case) {
- assert(obj == rax, "obj must be in rax, for cmpxchg");
- assert_different_registers(obj, var_size_in_bytes, t1);
- if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
- jmp(slow_case);
- } else {
- Register end = t1;
- Label retry;
- bind(retry);
- ExternalAddress heap_top((address) Universe::heap()->top_addr());
- movptr(obj, heap_top);
- if (var_size_in_bytes == noreg) {
- lea(end, Address(obj, con_size_in_bytes));
- } else {
- lea(end, Address(obj, var_size_in_bytes, Address::times_1));
- }
- // if end < obj then we wrapped around => object too long => slow case
- cmpptr(end, obj);
- jcc(Assembler::below, slow_case);
- cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
- jcc(Assembler::above, slow_case);
- // Compare obj with the top addr, and if still equal, store the new top addr in
- // end at the address of the top addr pointer. Sets ZF if was equal, and clears
- // it otherwise. Use lock prefix for atomicity on MPs.
- locked_cmpxchgptr(end, heap_top);
- jcc(Assembler::notEqual, retry);
- }
-}
-
-void MacroAssembler::enter() {
- push(rbp);
- mov(rbp, rsp);
-}
-
-// A 5 byte nop that is safe for patching (see patch_verified_entry)
-void MacroAssembler::fat_nop() {
- if (UseAddressNop) {
- addr_nop_5();
- } else {
- emit_byte(0x26); // es:
- emit_byte(0x2e); // cs:
- emit_byte(0x64); // fs:
- emit_byte(0x65); // gs:
- emit_byte(0x90);
- }
-}
-
-void MacroAssembler::fcmp(Register tmp) {
- fcmp(tmp, 1, true, true);
-}
-
-void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) {
- assert(!pop_right || pop_left, "usage error");
- if (VM_Version::supports_cmov()) {
- assert(tmp == noreg, "unneeded temp");
- if (pop_left) {
- fucomip(index);
- } else {
- fucomi(index);
- }
- if (pop_right) {
- fpop();
- }
- } else {
- assert(tmp != noreg, "need temp");
- if (pop_left) {
- if (pop_right) {
- fcompp();
- } else {
- fcomp(index);
- }
- } else {
- fcom(index);
- }
- // convert FPU condition into eflags condition via rax,
- save_rax(tmp);
- fwait(); fnstsw_ax();
- sahf();
- restore_rax(tmp);
- }
- // condition codes set as follows:
- //
- // CF (corresponds to C0) if x < y
- // PF (corresponds to C2) if unordered
- // ZF (corresponds to C3) if x = y
-}
-
-void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) {
- fcmp2int(dst, unordered_is_less, 1, true, true);
-}
-
-void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) {
- fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right);
- Label L;
- if (unordered_is_less) {
- movl(dst, -1);
- jcc(Assembler::parity, L);
- jcc(Assembler::below , L);
- movl(dst, 0);
- jcc(Assembler::equal , L);
- increment(dst);
- } else { // unordered is greater
- movl(dst, 1);
- jcc(Assembler::parity, L);
- jcc(Assembler::above , L);
- movl(dst, 0);
- jcc(Assembler::equal , L);
- decrementl(dst);
- }
- bind(L);
-}
-
-void MacroAssembler::fld_d(AddressLiteral src) {
- fld_d(as_Address(src));
-}
-
-void MacroAssembler::fld_s(AddressLiteral src) {
- fld_s(as_Address(src));
-}
-
-void MacroAssembler::fld_x(AddressLiteral src) {
- Assembler::fld_x(as_Address(src));
-}
-
-void MacroAssembler::fldcw(AddressLiteral src) {
- Assembler::fldcw(as_Address(src));
-}
-
-void MacroAssembler::pow_exp_core_encoding() {
- // kills rax, rcx, rdx
- subptr(rsp,sizeof(jdouble));
- // computes 2^X. Stack: X ...
- // f2xm1 computes 2^X-1 but only operates on -1<=X<=1. Get int(X) and
- // keep it on the thread's stack to compute 2^int(X) later
- // then compute 2^(X-int(X)) as (2^(X-int(X)-1+1)
- // final result is obtained with: 2^X = 2^int(X) * 2^(X-int(X))
- fld_s(0); // Stack: X X ...
- frndint(); // Stack: int(X) X ...
- fsuba(1); // Stack: int(X) X-int(X) ...
- fistp_s(Address(rsp,0)); // move int(X) as integer to thread's stack. Stack: X-int(X) ...
- f2xm1(); // Stack: 2^(X-int(X))-1 ...
- fld1(); // Stack: 1 2^(X-int(X))-1 ...
- faddp(1); // Stack: 2^(X-int(X))
- // computes 2^(int(X)): add exponent bias (1023) to int(X), then
- // shift int(X)+1023 to exponent position.
- // Exponent is limited to 11 bits if int(X)+1023 does not fit in 11
- // bits, set result to NaN. 0x000 and 0x7FF are reserved exponent
- // values so detect them and set result to NaN.
- movl(rax,Address(rsp,0));
- movl(rcx, -2048); // 11 bit mask and valid NaN binary encoding
- addl(rax, 1023);
- movl(rdx,rax);
- shll(rax,20);
- // Check that 0 < int(X)+1023 < 2047. Otherwise set rax to NaN.
- addl(rdx,1);
- // Check that 1 < int(X)+1023+1 < 2048
- // in 3 steps:
- // 1- (int(X)+1023+1)&-2048 == 0 => 0 <= int(X)+1023+1 < 2048
- // 2- (int(X)+1023+1)&-2048 != 0
- // 3- (int(X)+1023+1)&-2048 != 1
- // Do 2- first because addl just updated the flags.
- cmov32(Assembler::equal,rax,rcx);
- cmpl(rdx,1);
- cmov32(Assembler::equal,rax,rcx);
- testl(rdx,rcx);
- cmov32(Assembler::notEqual,rax,rcx);
- movl(Address(rsp,4),rax);
- movl(Address(rsp,0),0);
- fmul_d(Address(rsp,0)); // Stack: 2^X ...
- addptr(rsp,sizeof(jdouble));
-}
-
-void MacroAssembler::increase_precision() {
- subptr(rsp, BytesPerWord);
- fnstcw(Address(rsp, 0));
- movl(rax, Address(rsp, 0));
- orl(rax, 0x300);
- push(rax);
- fldcw(Address(rsp, 0));
- pop(rax);
-}
-
-void MacroAssembler::restore_precision() {
- fldcw(Address(rsp, 0));
- addptr(rsp, BytesPerWord);
-}
-
-void MacroAssembler::fast_pow() {
- // computes X^Y = 2^(Y * log2(X))
- // if fast computation is not possible, result is NaN. Requires
- // fallback from user of this macro.
- // increase precision for intermediate steps of the computation
- increase_precision();
- fyl2x(); // Stack: (Y*log2(X)) ...
- pow_exp_core_encoding(); // Stack: exp(X) ...
- restore_precision();
-}
-
-void MacroAssembler::fast_exp() {
- // computes exp(X) = 2^(X * log2(e))
- // if fast computation is not possible, result is NaN. Requires
- // fallback from user of this macro.
- // increase precision for intermediate steps of the computation
- increase_precision();
- fldl2e(); // Stack: log2(e) X ...
- fmulp(1); // Stack: (X*log2(e)) ...
- pow_exp_core_encoding(); // Stack: exp(X) ...
- restore_precision();
-}
-
-void MacroAssembler::pow_or_exp(bool is_exp, int num_fpu_regs_in_use) {
- // kills rax, rcx, rdx
- // pow and exp needs 2 extra registers on the fpu stack.
- Label slow_case, done;
- Register tmp = noreg;
- if (!VM_Version::supports_cmov()) {
- // fcmp needs a temporary so preserve rdx,
- tmp = rdx;
- }
- Register tmp2 = rax;
- Register tmp3 = rcx;
-
- if (is_exp) {
- // Stack: X
- fld_s(0); // duplicate argument for runtime call. Stack: X X
- fast_exp(); // Stack: exp(X) X
- fcmp(tmp, 0, false, false); // Stack: exp(X) X
- // exp(X) not equal to itself: exp(X) is NaN go to slow case.
- jcc(Assembler::parity, slow_case);
- // get rid of duplicate argument. Stack: exp(X)
- if (num_fpu_regs_in_use > 0) {
- fxch();
- fpop();
- } else {
- ffree(1);
- }
- jmp(done);
- } else {
- // Stack: X Y
- Label x_negative, y_odd;
-
- fldz(); // Stack: 0 X Y
- fcmp(tmp, 1, true, false); // Stack: X Y
- jcc(Assembler::above, x_negative);
-
- // X >= 0
-
- fld_s(1); // duplicate arguments for runtime call. Stack: Y X Y
- fld_s(1); // Stack: X Y X Y
- fast_pow(); // Stack: X^Y X Y
- fcmp(tmp, 0, false, false); // Stack: X^Y X Y
- // X^Y not equal to itself: X^Y is NaN go to slow case.
- jcc(Assembler::parity, slow_case);
- // get rid of duplicate arguments. Stack: X^Y
- if (num_fpu_regs_in_use > 0) {
- fxch(); fpop();
- fxch(); fpop();
- } else {
- ffree(2);
- ffree(1);
- }
- jmp(done);
-
- // X <= 0
- bind(x_negative);
-
- fld_s(1); // Stack: Y X Y
- frndint(); // Stack: int(Y) X Y
- fcmp(tmp, 2, false, false); // Stack: int(Y) X Y
- jcc(Assembler::notEqual, slow_case);
-
- subptr(rsp, 8);
-
- // For X^Y, when X < 0, Y has to be an integer and the final
- // result depends on whether it's odd or even. We just checked
- // that int(Y) == Y. We move int(Y) to gp registers as a 64 bit
- // integer to test its parity. If int(Y) is huge and doesn't fit
- // in the 64 bit integer range, the integer indefinite value will
- // end up in the gp registers. Huge numbers are all even, the
- // integer indefinite number is even so it's fine.
-
-#ifdef ASSERT
- // Let's check we don't end up with an integer indefinite number
- // when not expected. First test for huge numbers: check whether
- // int(Y)+1 == int(Y) which is true for very large numbers and
- // those are all even. A 64 bit integer is guaranteed to not
- // overflow for numbers where y+1 != y (when precision is set to
- // double precision).
- Label y_not_huge;
-
- fld1(); // Stack: 1 int(Y) X Y
- fadd(1); // Stack: 1+int(Y) int(Y) X Y
-
-#ifdef _LP64
- // trip to memory to force the precision down from double extended
- // precision
- fstp_d(Address(rsp, 0));
- fld_d(Address(rsp, 0));
-#endif
-
- fcmp(tmp, 1, true, false); // Stack: int(Y) X Y
-#endif
-
- // move int(Y) as 64 bit integer to thread's stack
- fistp_d(Address(rsp,0)); // Stack: X Y
-
-#ifdef ASSERT
- jcc(Assembler::notEqual, y_not_huge);
-
- // Y is huge so we know it's even. It may not fit in a 64 bit
- // integer and we don't want the debug code below to see the
- // integer indefinite value so overwrite int(Y) on the thread's
- // stack with 0.
- movl(Address(rsp, 0), 0);
- movl(Address(rsp, 4), 0);
-
- bind(y_not_huge);
-#endif
-
- fld_s(1); // duplicate arguments for runtime call. Stack: Y X Y
- fld_s(1); // Stack: X Y X Y
- fabs(); // Stack: abs(X) Y X Y
- fast_pow(); // Stack: abs(X)^Y X Y
- fcmp(tmp, 0, false, false); // Stack: abs(X)^Y X Y
- // abs(X)^Y not equal to itself: abs(X)^Y is NaN go to slow case.
-
- pop(tmp2);
- NOT_LP64(pop(tmp3));
- jcc(Assembler::parity, slow_case);
-
-#ifdef ASSERT
- // Check that int(Y) is not integer indefinite value (int
- // overflow). Shouldn't happen because for values that would
- // overflow, 1+int(Y)==Y which was tested earlier.
-#ifndef _LP64
- {
- Label integer;
- testl(tmp2, tmp2);
- jcc(Assembler::notZero, integer);
- cmpl(tmp3, 0x80000000);
- jcc(Assembler::notZero, integer);
- STOP("integer indefinite value shouldn't be seen here");
- bind(integer);
- }
-#else
- {
- Label integer;
- mov(tmp3, tmp2); // preserve tmp2 for parity check below
- shlq(tmp3, 1);
- jcc(Assembler::carryClear, integer);
- jcc(Assembler::notZero, integer);
- STOP("integer indefinite value shouldn't be seen here");
- bind(integer);
- }
-#endif
-#endif
-
- // get rid of duplicate arguments. Stack: X^Y
- if (num_fpu_regs_in_use > 0) {
- fxch(); fpop();
- fxch(); fpop();
- } else {
- ffree(2);
- ffree(1);
- }
-
- testl(tmp2, 1);
- jcc(Assembler::zero, done); // X <= 0, Y even: X^Y = abs(X)^Y
- // X <= 0, Y even: X^Y = -abs(X)^Y
-
- fchs(); // Stack: -abs(X)^Y Y
- jmp(done);
- }
-
- // slow case: runtime call
- bind(slow_case);
-
- fpop(); // pop incorrect result or int(Y)
-
- fp_runtime_fallback(is_exp ? CAST_FROM_FN_PTR(address, SharedRuntime::dexp) : CAST_FROM_FN_PTR(address, SharedRuntime::dpow),
- is_exp ? 1 : 2, num_fpu_regs_in_use);
-
- // Come here with result in F-TOS
- bind(done);
-}
-
-void MacroAssembler::fpop() {
- ffree();
- fincstp();
-}
-
-void MacroAssembler::fremr(Register tmp) {
- save_rax(tmp);
- { Label L;
- bind(L);
- fprem();
- fwait(); fnstsw_ax();
-#ifdef _LP64
- testl(rax, 0x400);
- jcc(Assembler::notEqual, L);
-#else
- sahf();
- jcc(Assembler::parity, L);
-#endif // _LP64
- }
- restore_rax(tmp);
- // Result is in ST0.
- // Note: fxch & fpop to get rid of ST1
- // (otherwise FPU stack could overflow eventually)
- fxch(1);
- fpop();
-}
-
-
-void MacroAssembler::incrementl(AddressLiteral dst) {
- if (reachable(dst)) {
- incrementl(as_Address(dst));
- } else {
- lea(rscratch1, dst);
- incrementl(Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::incrementl(ArrayAddress dst) {
- incrementl(as_Address(dst));
-}
-
-void MacroAssembler::incrementl(Register reg, int value) {
- if (value == min_jint) {addl(reg, value) ; return; }
- if (value < 0) { decrementl(reg, -value); return; }
- if (value == 0) { ; return; }
- if (value == 1 && UseIncDec) { incl(reg) ; return; }
- /* else */ { addl(reg, value) ; return; }
-}
-
-void MacroAssembler::incrementl(Address dst, int value) {
- if (value == min_jint) {addl(dst, value) ; return; }
- if (value < 0) { decrementl(dst, -value); return; }
- if (value == 0) { ; return; }
- if (value == 1 && UseIncDec) { incl(dst) ; return; }
- /* else */ { addl(dst, value) ; return; }
-}
-
-void MacroAssembler::jump(AddressLiteral dst) {
- if (reachable(dst)) {
- jmp_literal(dst.target(), dst.rspec());
- } else {
- lea(rscratch1, dst);
- jmp(rscratch1);
- }
-}
-
-void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
- if (reachable(dst)) {
- InstructionMark im(this);
- relocate(dst.reloc());
- const int short_size = 2;
- const int long_size = 6;
- int offs = (intptr_t)dst.target() - ((intptr_t)_code_pos);
- if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
- // 0111 tttn #8-bit disp
- emit_byte(0x70 | cc);
- emit_byte((offs - short_size) & 0xFF);
- } else {
- // 0000 1111 1000 tttn #32-bit disp
- emit_byte(0x0F);
- emit_byte(0x80 | cc);
- emit_long(offs - long_size);
- }
- } else {
-#ifdef ASSERT
- warning("reversing conditional branch");
-#endif /* ASSERT */
- Label skip;
- jccb(reverse[cc], skip);
- lea(rscratch1, dst);
- Assembler::jmp(rscratch1);
- bind(skip);
- }
-}
-
-void MacroAssembler::ldmxcsr(AddressLiteral src) {
- if (reachable(src)) {
- Assembler::ldmxcsr(as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::ldmxcsr(Address(rscratch1, 0));
- }
-}
-
-int MacroAssembler::load_signed_byte(Register dst, Address src) {
- int off;
- if (LP64_ONLY(true ||) VM_Version::is_P6()) {
- off = offset();
- movsbl(dst, src); // movsxb
- } else {
- off = load_unsigned_byte(dst, src);
- shll(dst, 24);
- sarl(dst, 24);
- }
- return off;
-}
-
-// Note: load_signed_short used to be called load_signed_word.
-// Although the 'w' in x86 opcodes refers to the term "word" in the assembler
-// manual, which means 16 bits, that usage is found nowhere in HotSpot code.
-// The term "word" in HotSpot means a 32- or 64-bit machine word.
-int MacroAssembler::load_signed_short(Register dst, Address src) {
- int off;
- if (LP64_ONLY(true ||) VM_Version::is_P6()) {
- // This is dubious to me since it seems safe to do a signed 16 => 64 bit
- // version but this is what 64bit has always done. This seems to imply
- // that users are only using 32bits worth.
- off = offset();
- movswl(dst, src); // movsxw
- } else {
- off = load_unsigned_short(dst, src);
- shll(dst, 16);
- sarl(dst, 16);
- }
- return off;
-}
-
-int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
- // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
- // and "3.9 Partial Register Penalties", p. 22).
- int off;
- if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) {
- off = offset();
- movzbl(dst, src); // movzxb
- } else {
- xorl(dst, dst);
- off = offset();
- movb(dst, src);
- }
- return off;
-}
-
-// Note: load_unsigned_short used to be called load_unsigned_word.
-int MacroAssembler::load_unsigned_short(Register dst, Address src) {
- // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
- // and "3.9 Partial Register Penalties", p. 22).
- int off;
- if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) {
- off = offset();
- movzwl(dst, src); // movzxw
- } else {
- xorl(dst, dst);
- off = offset();
- movw(dst, src);
- }
- return off;
-}
-
-void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
- switch (size_in_bytes) {
-#ifndef _LP64
- case 8:
- assert(dst2 != noreg, "second dest register required");
- movl(dst, src);
- movl(dst2, src.plus_disp(BytesPerInt));
- break;
-#else
- case 8: movq(dst, src); break;
-#endif
- case 4: movl(dst, src); break;
- case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
- case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
- default: ShouldNotReachHere();
- }
-}
-
-void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
- switch (size_in_bytes) {
-#ifndef _LP64
- case 8:
- assert(src2 != noreg, "second source register required");
- movl(dst, src);
- movl(dst.plus_disp(BytesPerInt), src2);
- break;
-#else
- case 8: movq(dst, src); break;
-#endif
- case 4: movl(dst, src); break;
- case 2: movw(dst, src); break;
- case 1: movb(dst, src); break;
- default: ShouldNotReachHere();
- }
-}
-
-void MacroAssembler::mov32(AddressLiteral dst, Register src) {
- if (reachable(dst)) {
- movl(as_Address(dst), src);
- } else {
- lea(rscratch1, dst);
- movl(Address(rscratch1, 0), src);
- }
-}
-
-void MacroAssembler::mov32(Register dst, AddressLiteral src) {
- if (reachable(src)) {
- movl(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- movl(dst, Address(rscratch1, 0));
- }
-}
-
-// C++ bool manipulation
-
-void MacroAssembler::movbool(Register dst, Address src) {
- if(sizeof(bool) == 1)
- movb(dst, src);
- else if(sizeof(bool) == 2)
- movw(dst, src);
- else if(sizeof(bool) == 4)
- movl(dst, src);
- else
- // unsupported
- ShouldNotReachHere();
-}
-
-void MacroAssembler::movbool(Address dst, bool boolconst) {
- if(sizeof(bool) == 1)
- movb(dst, (int) boolconst);
- else if(sizeof(bool) == 2)
- movw(dst, (int) boolconst);
- else if(sizeof(bool) == 4)
- movl(dst, (int) boolconst);
- else
- // unsupported
- ShouldNotReachHere();
-}
-
-void MacroAssembler::movbool(Address dst, Register src) {
- if(sizeof(bool) == 1)
- movb(dst, src);
- else if(sizeof(bool) == 2)
- movw(dst, src);
- else if(sizeof(bool) == 4)
- movl(dst, src);
- else
- // unsupported
- ShouldNotReachHere();
-}
-
-void MacroAssembler::movbyte(ArrayAddress dst, int src) {
- movb(as_Address(dst), src);
-}
-
-void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- movdl(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- movdl(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::movq(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- movq(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- movq(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- if (UseXmmLoadAndClearUpper) {
- movsd (dst, as_Address(src));
- } else {
- movlpd(dst, as_Address(src));
- }
- } else {
- lea(rscratch1, src);
- if (UseXmmLoadAndClearUpper) {
- movsd (dst, Address(rscratch1, 0));
- } else {
- movlpd(dst, Address(rscratch1, 0));
- }
- }
-}
-
-void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- movss(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- movss(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::movptr(Register dst, Register src) {
- LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
-}
-
-void MacroAssembler::movptr(Register dst, Address src) {
- LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
-}
-
-// src should NEVER be a real pointer. Use AddressLiteral for true pointers
-void MacroAssembler::movptr(Register dst, intptr_t src) {
- LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src));
-}
-
-void MacroAssembler::movptr(Address dst, Register src) {
- LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
-}
-
-void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- Assembler::movdqu(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::movdqu(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- Assembler::movsd(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::movsd(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- Assembler::movss(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::movss(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- Assembler::mulsd(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::mulsd(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- Assembler::mulss(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::mulss(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::null_check(Register reg, int offset) {
- if (needs_explicit_null_check(offset)) {
- // provoke OS NULL exception if reg = NULL by
- // accessing M[reg] w/o changing any (non-CC) registers
- // NOTE: cmpl is plenty here to provoke a segv
- cmpptr(rax, Address(reg, 0));
- // Note: should probably use testl(rax, Address(reg, 0));
- // may be shorter code (however, this version of
- // testl needs to be implemented first)
- } else {
- // nothing to do, (later) access of M[reg + offset]
- // will provoke OS NULL exception if reg = NULL
- }
-}
-
-void MacroAssembler::os_breakpoint() {
- // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
- // (e.g., MSVC can't call ps() otherwise)
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-}
-
-void MacroAssembler::pop_CPU_state() {
- pop_FPU_state();
- pop_IU_state();
-}
-
-void MacroAssembler::pop_FPU_state() {
- NOT_LP64(frstor(Address(rsp, 0));)
- LP64_ONLY(fxrstor(Address(rsp, 0));)
- addptr(rsp, FPUStateSizeInWords * wordSize);
-}
-
-void MacroAssembler::pop_IU_state() {
- popa();
- LP64_ONLY(addq(rsp, 8));
- popf();
-}
-
-// Save Integer and Float state
-// Warning: Stack must be 16 byte aligned (64bit)
-void MacroAssembler::push_CPU_state() {
- push_IU_state();
- push_FPU_state();
-}
-
-void MacroAssembler::push_FPU_state() {
- subptr(rsp, FPUStateSizeInWords * wordSize);
-#ifndef _LP64
- fnsave(Address(rsp, 0));
- fwait();
-#else
- fxsave(Address(rsp, 0));
-#endif // LP64
-}
-
-void MacroAssembler::push_IU_state() {
- // Push flags first because pusha kills them
- pushf();
- // Make sure rsp stays 16-byte aligned
- LP64_ONLY(subq(rsp, 8));
- pusha();
-}
-
-void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) {
- // determine java_thread register
- if (!java_thread->is_valid()) {
- java_thread = rdi;
- get_thread(java_thread);
- }
- // we must set sp to zero to clear frame
- movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
- if (clear_fp) {
- movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
- }
-
- if (clear_pc)
- movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
-
-}
-
-void MacroAssembler::restore_rax(Register tmp) {
- if (tmp == noreg) pop(rax);
- else if (tmp != rax) mov(rax, tmp);
-}
-
-void MacroAssembler::round_to(Register reg, int modulus) {
- addptr(reg, modulus - 1);
- andptr(reg, -modulus);
-}
-
-void MacroAssembler::save_rax(Register tmp) {
- if (tmp == noreg) push(rax);
- else if (tmp != rax) mov(tmp, rax);
-}
-
-// Write serialization page so VM thread can do a pseudo remote membar.
-// We use the current thread pointer to calculate a thread specific
-// offset to write to within the page. This minimizes bus traffic
-// due to cache line collision.
-void MacroAssembler::serialize_memory(Register thread, Register tmp) {
- movl(tmp, thread);
- shrl(tmp, os::get_serialize_page_shift_count());
- andl(tmp, (os::vm_page_size() - sizeof(int)));
-
- Address index(noreg, tmp, Address::times_1);
- ExternalAddress page(os::get_memory_serialize_page());
-
- // Size of store must match masking code above
- movl(as_Address(ArrayAddress(page, index)), tmp);
-}
-
-// Calls to C land
-//
-// When entering C land, the rbp, & rsp of the last Java frame have to be recorded
-// in the (thread-local) JavaThread object. When leaving C land, the last Java fp
-// has to be reset to 0. This is required to allow proper stack traversal.
-void MacroAssembler::set_last_Java_frame(Register java_thread,
- Register last_java_sp,
- Register last_java_fp,
- address last_java_pc) {
- // determine java_thread register
- if (!java_thread->is_valid()) {
- java_thread = rdi;
- get_thread(java_thread);
- }
- // determine last_java_sp register
- if (!last_java_sp->is_valid()) {
- last_java_sp = rsp;
- }
-
- // last_java_fp is optional
-
- if (last_java_fp->is_valid()) {
- movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
- }
-
- // last_java_pc is optional
-
- if (last_java_pc != NULL) {
- lea(Address(java_thread,
- JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()),
- InternalAddress(last_java_pc));
-
- }
- movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
-}
-
-void MacroAssembler::shlptr(Register dst, int imm8) {
- LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8));
-}
-
-void MacroAssembler::shrptr(Register dst, int imm8) {
- LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8));
-}
-
-void MacroAssembler::sign_extend_byte(Register reg) {
- if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) {
- movsbl(reg, reg); // movsxb
- } else {
- shll(reg, 24);
- sarl(reg, 24);
- }
-}
-
-void MacroAssembler::sign_extend_short(Register reg) {
- if (LP64_ONLY(true ||) VM_Version::is_P6()) {
- movswl(reg, reg); // movsxw
- } else {
- shll(reg, 16);
- sarl(reg, 16);
- }
-}
-
-void MacroAssembler::testl(Register dst, AddressLiteral src) {
- assert(reachable(src), "Address should be reachable");
- testl(dst, as_Address(src));
-}
-
-void MacroAssembler::sqrtsd(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- Assembler::sqrtsd(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::sqrtsd(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- Assembler::sqrtss(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::sqrtss(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- Assembler::subsd(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::subsd(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::subss(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- Assembler::subss(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::subss(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- Assembler::ucomisd(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::ucomisd(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- Assembler::ucomiss(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::ucomiss(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
- // Used in sign-bit flipping with aligned address.
- assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
- if (reachable(src)) {
- Assembler::xorpd(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::xorpd(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
- // Used in sign-bit flipping with aligned address.
- assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
- if (reachable(src)) {
- Assembler::xorps(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::xorps(dst, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) {
- // Used in sign-bit flipping with aligned address.
- assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
- if (reachable(src)) {
- Assembler::pshufb(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::pshufb(dst, Address(rscratch1, 0));
- }
-}
-
-// AVX 3-operands instructions
-
-void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
- if (reachable(src)) {
- vaddsd(dst, nds, as_Address(src));
- } else {
- lea(rscratch1, src);
- vaddsd(dst, nds, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
- if (reachable(src)) {
- vaddss(dst, nds, as_Address(src));
- } else {
- lea(rscratch1, src);
- vaddss(dst, nds, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
- if (reachable(src)) {
- vandpd(dst, nds, as_Address(src), vector256);
- } else {
- lea(rscratch1, src);
- vandpd(dst, nds, Address(rscratch1, 0), vector256);
- }
-}
-
-void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
- if (reachable(src)) {
- vandps(dst, nds, as_Address(src), vector256);
- } else {
- lea(rscratch1, src);
- vandps(dst, nds, Address(rscratch1, 0), vector256);
- }
-}
-
-void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
- if (reachable(src)) {
- vdivsd(dst, nds, as_Address(src));
- } else {
- lea(rscratch1, src);
- vdivsd(dst, nds, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
- if (reachable(src)) {
- vdivss(dst, nds, as_Address(src));
- } else {
- lea(rscratch1, src);
- vdivss(dst, nds, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
- if (reachable(src)) {
- vmulsd(dst, nds, as_Address(src));
- } else {
- lea(rscratch1, src);
- vmulsd(dst, nds, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
- if (reachable(src)) {
- vmulss(dst, nds, as_Address(src));
- } else {
- lea(rscratch1, src);
- vmulss(dst, nds, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
- if (reachable(src)) {
- vsubsd(dst, nds, as_Address(src));
- } else {
- lea(rscratch1, src);
- vsubsd(dst, nds, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
- if (reachable(src)) {
- vsubss(dst, nds, as_Address(src));
- } else {
- lea(rscratch1, src);
- vsubss(dst, nds, Address(rscratch1, 0));
- }
-}
-
-void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
- if (reachable(src)) {
- vxorpd(dst, nds, as_Address(src), vector256);
- } else {
- lea(rscratch1, src);
- vxorpd(dst, nds, Address(rscratch1, 0), vector256);
- }
-}
-
-void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
- if (reachable(src)) {
- vxorps(dst, nds, as_Address(src), vector256);
- } else {
- lea(rscratch1, src);
- vxorps(dst, nds, Address(rscratch1, 0), vector256);
- }
-}
-
-
-//////////////////////////////////////////////////////////////////////////////////
-#ifndef SERIALGC
-
-void MacroAssembler::g1_write_barrier_pre(Register obj,
- Register pre_val,
- Register thread,
- Register tmp,
- bool tosca_live,
- bool expand_call) {
-
- // If expand_call is true then we expand the call_VM_leaf macro
- // directly to skip generating the check by
- // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
-
-#ifdef _LP64
- assert(thread == r15_thread, "must be");
-#endif // _LP64
-
- Label done;
- Label runtime;
-
- assert(pre_val != noreg, "check this code");
-
- if (obj != noreg) {
- assert_different_registers(obj, pre_val, tmp);
- assert(pre_val != rax, "check this code");
- }
-
- Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
- PtrQueue::byte_offset_of_active()));
- Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
- PtrQueue::byte_offset_of_index()));
- Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
- PtrQueue::byte_offset_of_buf()));
-
-
- // Is marking active?
- if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
- cmpl(in_progress, 0);
- } else {
- assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
- cmpb(in_progress, 0);
- }
- jcc(Assembler::equal, done);
-
- // Do we need to load the previous value?
- if (obj != noreg) {
- load_heap_oop(pre_val, Address(obj, 0));
- }
-
- // Is the previous value null?
- cmpptr(pre_val, (int32_t) NULL_WORD);
- jcc(Assembler::equal, done);
-
- // Can we store original value in the thread's buffer?
- // Is index == 0?
- // (The index field is typed as size_t.)
-
- movptr(tmp, index); // tmp := *index_adr
- cmpptr(tmp, 0); // tmp == 0?
- jcc(Assembler::equal, runtime); // If yes, goto runtime
-
- subptr(tmp, wordSize); // tmp := tmp - wordSize
- movptr(index, tmp); // *index_adr := tmp
- addptr(tmp, buffer); // tmp := tmp + *buffer_adr
-
- // Record the previous value
- movptr(Address(tmp, 0), pre_val);
- jmp(done);
-
- bind(runtime);
- // save the live input values
- if(tosca_live) push(rax);
-
- if (obj != noreg && obj != rax)
- push(obj);
-
- if (pre_val != rax)
- push(pre_val);
-
- // Calling the runtime using the regular call_VM_leaf mechanism generates
- // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
- // that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL.
- //
- // If we care generating the pre-barrier without a frame (e.g. in the
- // intrinsified Reference.get() routine) then ebp might be pointing to
- // the caller frame and so this check will most likely fail at runtime.
- //
- // Expanding the call directly bypasses the generation of the check.
- // So when we do not have have a full interpreter frame on the stack
- // expand_call should be passed true.
-
- NOT_LP64( push(thread); )
-
- if (expand_call) {
- LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); )
- pass_arg1(this, thread);
- pass_arg0(this, pre_val);
- MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2);
- } else {
- call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
- }
-
- NOT_LP64( pop(thread); )
-
- // save the live input values
- if (pre_val != rax)
- pop(pre_val);
-
- if (obj != noreg && obj != rax)
- pop(obj);
-
- if(tosca_live) pop(rax);
-
- bind(done);
-}
-
-void MacroAssembler::g1_write_barrier_post(Register store_addr,
- Register new_val,
- Register thread,
- Register tmp,
- Register tmp2) {
-#ifdef _LP64
- assert(thread == r15_thread, "must be");
-#endif // _LP64
-
- Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
- PtrQueue::byte_offset_of_index()));
- Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
- PtrQueue::byte_offset_of_buf()));
-
- BarrierSet* bs = Universe::heap()->barrier_set();
- CardTableModRefBS* ct = (CardTableModRefBS*)bs;
- Label done;
- Label runtime;
-
- // Does store cross heap regions?
-
- movptr(tmp, store_addr);
- xorptr(tmp, new_val);
- shrptr(tmp, HeapRegion::LogOfHRGrainBytes);
- jcc(Assembler::equal, done);
-
- // crosses regions, storing NULL?
-
- cmpptr(new_val, (int32_t) NULL_WORD);
- jcc(Assembler::equal, done);
-
- // storing region crossing non-NULL, is card already dirty?
-
- ExternalAddress cardtable((address) ct->byte_map_base);
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
-#ifdef _LP64
- const Register card_addr = tmp;
-
- movq(card_addr, store_addr);
- shrq(card_addr, CardTableModRefBS::card_shift);
-
- lea(tmp2, cardtable);
-
- // get the address of the card
- addq(card_addr, tmp2);
-#else
- const Register card_index = tmp;
-
- movl(card_index, store_addr);
- shrl(card_index, CardTableModRefBS::card_shift);
-
- Address index(noreg, card_index, Address::times_1);
- const Register card_addr = tmp;
- lea(card_addr, as_Address(ArrayAddress(cardtable, index)));
-#endif
- cmpb(Address(card_addr, 0), 0);
- jcc(Assembler::equal, done);
-
- // storing a region crossing, non-NULL oop, card is clean.
- // dirty card and log.
-
- movb(Address(card_addr, 0), 0);
-
- cmpl(queue_index, 0);
- jcc(Assembler::equal, runtime);
- subl(queue_index, wordSize);
- movptr(tmp2, buffer);
-#ifdef _LP64
- movslq(rscratch1, queue_index);
- addq(tmp2, rscratch1);
- movq(Address(tmp2, 0), card_addr);
-#else
- addl(tmp2, queue_index);
- movl(Address(tmp2, 0), card_index);
-#endif
- jmp(done);
-
- bind(runtime);
- // save the live input values
- push(store_addr);
- push(new_val);
-#ifdef _LP64
- call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, r15_thread);
-#else
- push(thread);
- call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
- pop(thread);
-#endif
- pop(new_val);
- pop(store_addr);
-
- bind(done);
-}
-
-#endif // SERIALGC
-//////////////////////////////////////////////////////////////////////////////////
-
-
-void MacroAssembler::store_check(Register obj) {
- // Does a store check for the oop in register obj. The content of
- // register obj is destroyed afterwards.
- store_check_part_1(obj);
- store_check_part_2(obj);
-}
-
-void MacroAssembler::store_check(Register obj, Address dst) {
- store_check(obj);
-}
-
-
-// split the store check operation so that other instructions can be scheduled inbetween
-void MacroAssembler::store_check_part_1(Register obj) {
- BarrierSet* bs = Universe::heap()->barrier_set();
- assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
- shrptr(obj, CardTableModRefBS::card_shift);
-}
-
-void MacroAssembler::store_check_part_2(Register obj) {
- BarrierSet* bs = Universe::heap()->barrier_set();
- assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
- CardTableModRefBS* ct = (CardTableModRefBS*)bs;
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
-
- // The calculation for byte_map_base is as follows:
- // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
- // So this essentially converts an address to a displacement and
- // it will never need to be relocated. On 64bit however the value may be too
- // large for a 32bit displacement
-
- intptr_t disp = (intptr_t) ct->byte_map_base;
- if (is_simm32(disp)) {
- Address cardtable(noreg, obj, Address::times_1, disp);
- movb(cardtable, 0);
- } else {
- // By doing it as an ExternalAddress disp could be converted to a rip-relative
- // displacement and done in a single instruction given favorable mapping and
- // a smarter version of as_Address. Worst case it is two instructions which
- // is no worse off then loading disp into a register and doing as a simple
- // Address() as above.
- // We can't do as ExternalAddress as the only style since if disp == 0 we'll
- // assert since NULL isn't acceptable in a reloci (see 6644928). In any case
- // in some cases we'll get a single instruction version.
-
- ExternalAddress cardtable((address)disp);
- Address index(noreg, obj, Address::times_1);
- movb(as_Address(ArrayAddress(cardtable, index)), 0);
- }
-}
-
-void MacroAssembler::subptr(Register dst, int32_t imm32) {
- LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32));
-}
-
-// Force generation of a 4 byte immediate value even if it fits into 8bit
-void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
- LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32));
-}
-
-void MacroAssembler::subptr(Register dst, Register src) {
- LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src));
-}
-
-// C++ bool manipulation
-void MacroAssembler::testbool(Register dst) {
- if(sizeof(bool) == 1)
- testb(dst, 0xff);
- else if(sizeof(bool) == 2) {
- // testw implementation needed for two byte bools
- ShouldNotReachHere();
- } else if(sizeof(bool) == 4)
- testl(dst, dst);
- else
- // unsupported
- ShouldNotReachHere();
-}
-
-void MacroAssembler::testptr(Register dst, Register src) {
- LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
-}
-
-// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
-void MacroAssembler::tlab_allocate(Register obj,
- Register var_size_in_bytes,
- int con_size_in_bytes,
- Register t1,
- Register t2,
- Label& slow_case) {
- assert_different_registers(obj, t1, t2);
- assert_different_registers(obj, var_size_in_bytes, t1);
- Register end = t2;
- Register thread = NOT_LP64(t1) LP64_ONLY(r15_thread);
-
- verify_tlab();
-
- NOT_LP64(get_thread(thread));
-
- movptr(obj, Address(thread, JavaThread::tlab_top_offset()));
- if (var_size_in_bytes == noreg) {
- lea(end, Address(obj, con_size_in_bytes));
- } else {
- lea(end, Address(obj, var_size_in_bytes, Address::times_1));
- }
- cmpptr(end, Address(thread, JavaThread::tlab_end_offset()));
- jcc(Assembler::above, slow_case);
-
- // update the tlab top pointer
- movptr(Address(thread, JavaThread::tlab_top_offset()), end);
-
- // recover var_size_in_bytes if necessary
- if (var_size_in_bytes == end) {
- subptr(var_size_in_bytes, obj);
- }
- verify_tlab();
-}
-
-// Preserves rbx, and rdx.
-Register MacroAssembler::tlab_refill(Label& retry,
- Label& try_eden,
- Label& slow_case) {
- Register top = rax;
- Register t1 = rcx;
- Register t2 = rsi;
- Register thread_reg = NOT_LP64(rdi) LP64_ONLY(r15_thread);
- assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
- Label do_refill, discard_tlab;
-
- if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
- // No allocation in the shared eden.
- jmp(slow_case);
- }
-
- NOT_LP64(get_thread(thread_reg));
-
- movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
- movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
-
- // calculate amount of free space
- subptr(t1, top);
- shrptr(t1, LogHeapWordSize);
-
- // Retain tlab and allocate object in shared space if
- // the amount free in the tlab is too large to discard.
- cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
- jcc(Assembler::lessEqual, discard_tlab);
-
- // Retain
- // %%% yuck as movptr...
- movptr(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment());
- addptr(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2);
- if (TLABStats) {
- // increment number of slow_allocations
- addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1);
- }
- jmp(try_eden);
-
- bind(discard_tlab);
- if (TLABStats) {
- // increment number of refills
- addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1);
- // accumulate wastage -- t1 is amount free in tlab
- addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1);
- }
-
- // if tlab is currently allocated (top or end != null) then
- // fill [top, end + alignment_reserve) with array object
- testptr(top, top);
- jcc(Assembler::zero, do_refill);
-
- // set up the mark word
- movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
- // set the length to the remaining space
- subptr(t1, typeArrayOopDesc::header_size(T_INT));
- addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve());
- shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint)));
- movl(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
- // set klass to intArrayKlass
- // dubious reloc why not an oop reloc?
- movptr(t1, ExternalAddress((address)Universe::intArrayKlassObj_addr()));
- // store klass last. concurrent gcs assumes klass length is valid if
- // klass field is not null.
- store_klass(top, t1);
-
- movptr(t1, top);
- subptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
- incr_allocated_bytes(thread_reg, t1, 0);
-
- // refill the tlab with an eden allocation
- bind(do_refill);
- movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
- shlptr(t1, LogHeapWordSize);
- // allocate new tlab, address returned in top
- eden_allocate(top, t1, 0, t2, slow_case);
-
- // Check that t1 was preserved in eden_allocate.
-#ifdef ASSERT
- if (UseTLAB) {
- Label ok;
- Register tsize = rsi;
- assert_different_registers(tsize, thread_reg, t1);
- push(tsize);
- movptr(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
- shlptr(tsize, LogHeapWordSize);
- cmpptr(t1, tsize);
- jcc(Assembler::equal, ok);
- STOP("assert(t1 != tlab size)");
- should_not_reach_here();
-
- bind(ok);
- pop(tsize);
- }
-#endif
- movptr(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top);
- movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top);
- addptr(top, t1);
- subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
- movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
- verify_tlab();
- jmp(retry);
-
- return thread_reg; // for use by caller
-}
-
-void MacroAssembler::incr_allocated_bytes(Register thread,
- Register var_size_in_bytes,
- int con_size_in_bytes,
- Register t1) {
- if (!thread->is_valid()) {
-#ifdef _LP64
- thread = r15_thread;
-#else
- assert(t1->is_valid(), "need temp reg");
- thread = t1;
- get_thread(thread);
-#endif
- }
-
-#ifdef _LP64
- if (var_size_in_bytes->is_valid()) {
- addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
- } else {
- addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
- }
-#else
- if (var_size_in_bytes->is_valid()) {
- addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
- } else {
- addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
- }
- adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0);
-#endif
-}
-
-void MacroAssembler::fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use) {
- pusha();
-
- // if we are coming from c1, xmm registers may be live
- int off = 0;
- if (UseSSE == 1) {
- subptr(rsp, sizeof(jdouble)*8);
- movflt(Address(rsp,off++*sizeof(jdouble)),xmm0);
- movflt(Address(rsp,off++*sizeof(jdouble)),xmm1);
- movflt(Address(rsp,off++*sizeof(jdouble)),xmm2);
- movflt(Address(rsp,off++*sizeof(jdouble)),xmm3);
- movflt(Address(rsp,off++*sizeof(jdouble)),xmm4);
- movflt(Address(rsp,off++*sizeof(jdouble)),xmm5);
- movflt(Address(rsp,off++*sizeof(jdouble)),xmm6);
- movflt(Address(rsp,off++*sizeof(jdouble)),xmm7);
- } else if (UseSSE >= 2) {
-#ifdef COMPILER2
- if (MaxVectorSize > 16) {
- assert(UseAVX > 0, "256bit vectors are supported only with AVX");
- // Save upper half of YMM registes
- subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
- vextractf128h(Address(rsp, 0),xmm0);
- vextractf128h(Address(rsp, 16),xmm1);
- vextractf128h(Address(rsp, 32),xmm2);
- vextractf128h(Address(rsp, 48),xmm3);
- vextractf128h(Address(rsp, 64),xmm4);
- vextractf128h(Address(rsp, 80),xmm5);
- vextractf128h(Address(rsp, 96),xmm6);
- vextractf128h(Address(rsp,112),xmm7);
-#ifdef _LP64
- vextractf128h(Address(rsp,128),xmm8);
- vextractf128h(Address(rsp,144),xmm9);
- vextractf128h(Address(rsp,160),xmm10);
- vextractf128h(Address(rsp,176),xmm11);
- vextractf128h(Address(rsp,192),xmm12);
- vextractf128h(Address(rsp,208),xmm13);
- vextractf128h(Address(rsp,224),xmm14);
- vextractf128h(Address(rsp,240),xmm15);
-#endif
- }
-#endif
- // Save whole 128bit (16 bytes) XMM regiters
- subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
- movdqu(Address(rsp,off++*16),xmm0);
- movdqu(Address(rsp,off++*16),xmm1);
- movdqu(Address(rsp,off++*16),xmm2);
- movdqu(Address(rsp,off++*16),xmm3);
- movdqu(Address(rsp,off++*16),xmm4);
- movdqu(Address(rsp,off++*16),xmm5);
- movdqu(Address(rsp,off++*16),xmm6);
- movdqu(Address(rsp,off++*16),xmm7);
-#ifdef _LP64
- movdqu(Address(rsp,off++*16),xmm8);
- movdqu(Address(rsp,off++*16),xmm9);
- movdqu(Address(rsp,off++*16),xmm10);
- movdqu(Address(rsp,off++*16),xmm11);
- movdqu(Address(rsp,off++*16),xmm12);
- movdqu(Address(rsp,off++*16),xmm13);
- movdqu(Address(rsp,off++*16),xmm14);
- movdqu(Address(rsp,off++*16),xmm15);
-#endif
- }
-
- // Preserve registers across runtime call
- int incoming_argument_and_return_value_offset = -1;
- if (num_fpu_regs_in_use > 1) {
- // Must preserve all other FPU regs (could alternatively convert
- // SharedRuntime::dsin, dcos etc. into assembly routines known not to trash
- // FPU state, but can not trust C compiler)
- NEEDS_CLEANUP;
- // NOTE that in this case we also push the incoming argument(s) to
- // the stack and restore it later; we also use this stack slot to
- // hold the return value from dsin, dcos etc.
- for (int i = 0; i < num_fpu_regs_in_use; i++) {
- subptr(rsp, sizeof(jdouble));
- fstp_d(Address(rsp, 0));
- }
- incoming_argument_and_return_value_offset = sizeof(jdouble)*(num_fpu_regs_in_use-1);
- for (int i = nb_args-1; i >= 0; i--) {
- fld_d(Address(rsp, incoming_argument_and_return_value_offset-i*sizeof(jdouble)));
- }
- }
-
- subptr(rsp, nb_args*sizeof(jdouble));
- for (int i = 0; i < nb_args; i++) {
- fstp_d(Address(rsp, i*sizeof(jdouble)));
- }
-
-#ifdef _LP64
- if (nb_args > 0) {
- movdbl(xmm0, Address(rsp, 0));
- }
- if (nb_args > 1) {
- movdbl(xmm1, Address(rsp, sizeof(jdouble)));
- }
- assert(nb_args <= 2, "unsupported number of args");
-#endif // _LP64
-
- // NOTE: we must not use call_VM_leaf here because that requires a
- // complete interpreter frame in debug mode -- same bug as 4387334
- // MacroAssembler::call_VM_leaf_base is perfectly safe and will
- // do proper 64bit abi
-
- NEEDS_CLEANUP;
- // Need to add stack banging before this runtime call if it needs to
- // be taken; however, there is no generic stack banging routine at
- // the MacroAssembler level
-
- MacroAssembler::call_VM_leaf_base(runtime_entry, 0);
-
-#ifdef _LP64
- movsd(Address(rsp, 0), xmm0);
- fld_d(Address(rsp, 0));
-#endif // _LP64
- addptr(rsp, sizeof(jdouble) * nb_args);
- if (num_fpu_regs_in_use > 1) {
- // Must save return value to stack and then restore entire FPU
- // stack except incoming arguments
- fstp_d(Address(rsp, incoming_argument_and_return_value_offset));
- for (int i = 0; i < num_fpu_regs_in_use - nb_args; i++) {
- fld_d(Address(rsp, 0));
- addptr(rsp, sizeof(jdouble));
- }
- fld_d(Address(rsp, (nb_args-1)*sizeof(jdouble)));
- addptr(rsp, sizeof(jdouble) * nb_args);
- }
-
- off = 0;
- if (UseSSE == 1) {
- movflt(xmm0, Address(rsp,off++*sizeof(jdouble)));
- movflt(xmm1, Address(rsp,off++*sizeof(jdouble)));
- movflt(xmm2, Address(rsp,off++*sizeof(jdouble)));
- movflt(xmm3, Address(rsp,off++*sizeof(jdouble)));
- movflt(xmm4, Address(rsp,off++*sizeof(jdouble)));
- movflt(xmm5, Address(rsp,off++*sizeof(jdouble)));
- movflt(xmm6, Address(rsp,off++*sizeof(jdouble)));
- movflt(xmm7, Address(rsp,off++*sizeof(jdouble)));
- addptr(rsp, sizeof(jdouble)*8);
- } else if (UseSSE >= 2) {
- // Restore whole 128bit (16 bytes) XMM regiters
- movdqu(xmm0, Address(rsp,off++*16));
- movdqu(xmm1, Address(rsp,off++*16));
- movdqu(xmm2, Address(rsp,off++*16));
- movdqu(xmm3, Address(rsp,off++*16));
- movdqu(xmm4, Address(rsp,off++*16));
- movdqu(xmm5, Address(rsp,off++*16));
- movdqu(xmm6, Address(rsp,off++*16));
- movdqu(xmm7, Address(rsp,off++*16));
-#ifdef _LP64
- movdqu(xmm8, Address(rsp,off++*16));
- movdqu(xmm9, Address(rsp,off++*16));
- movdqu(xmm10, Address(rsp,off++*16));
- movdqu(xmm11, Address(rsp,off++*16));
- movdqu(xmm12, Address(rsp,off++*16));
- movdqu(xmm13, Address(rsp,off++*16));
- movdqu(xmm14, Address(rsp,off++*16));
- movdqu(xmm15, Address(rsp,off++*16));
-#endif
- addptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
-#ifdef COMPILER2
- if (MaxVectorSize > 16) {
- // Restore upper half of YMM registes.
- vinsertf128h(xmm0, Address(rsp, 0));
- vinsertf128h(xmm1, Address(rsp, 16));
- vinsertf128h(xmm2, Address(rsp, 32));
- vinsertf128h(xmm3, Address(rsp, 48));
- vinsertf128h(xmm4, Address(rsp, 64));
- vinsertf128h(xmm5, Address(rsp, 80));
- vinsertf128h(xmm6, Address(rsp, 96));
- vinsertf128h(xmm7, Address(rsp,112));
-#ifdef _LP64
- vinsertf128h(xmm8, Address(rsp,128));
- vinsertf128h(xmm9, Address(rsp,144));
- vinsertf128h(xmm10, Address(rsp,160));
- vinsertf128h(xmm11, Address(rsp,176));
- vinsertf128h(xmm12, Address(rsp,192));
- vinsertf128h(xmm13, Address(rsp,208));
- vinsertf128h(xmm14, Address(rsp,224));
- vinsertf128h(xmm15, Address(rsp,240));
-#endif
- addptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
- }
-#endif
- }
- popa();
-}
-
-static const double pi_4 = 0.7853981633974483;
-
-void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) {
- // A hand-coded argument reduction for values in fabs(pi/4, pi/2)
- // was attempted in this code; unfortunately it appears that the
- // switch to 80-bit precision and back causes this to be
- // unprofitable compared with simply performing a runtime call if
- // the argument is out of the (-pi/4, pi/4) range.
-
- Register tmp = noreg;
- if (!VM_Version::supports_cmov()) {
- // fcmp needs a temporary so preserve rbx,
- tmp = rbx;
- push(tmp);
- }
-
- Label slow_case, done;
-
- ExternalAddress pi4_adr = (address)&pi_4;
- if (reachable(pi4_adr)) {
- // x ?<= pi/4
- fld_d(pi4_adr);
- fld_s(1); // Stack: X PI/4 X
- fabs(); // Stack: |X| PI/4 X
- fcmp(tmp);
- jcc(Assembler::above, slow_case);
-
- // fastest case: -pi/4 <= x <= pi/4
- switch(trig) {
- case 's':
- fsin();
- break;
- case 'c':
- fcos();
- break;
- case 't':
- ftan();
- break;
- default:
- assert(false, "bad intrinsic");
- break;
- }
- jmp(done);
- }
-
- // slow case: runtime call
- bind(slow_case);
-
- switch(trig) {
- case 's':
- {
- fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), 1, num_fpu_regs_in_use);
- }
- break;
- case 'c':
- {
- fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), 1, num_fpu_regs_in_use);
- }
- break;
- case 't':
- {
- fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), 1, num_fpu_regs_in_use);
- }
- break;
- default:
- assert(false, "bad intrinsic");
- break;
- }
-
- // Come here with result in F-TOS
- bind(done);
-
- if (tmp != noreg) {
- pop(tmp);
- }
-}
-
-
-// Look up the method for a megamorphic invokeinterface call.
-// The target method is determined by <intf_klass, itable_index>.
-// The receiver klass is in recv_klass.
-// On success, the result will be in method_result, and execution falls through.
-// On failure, execution transfers to the given label.
-void MacroAssembler::lookup_interface_method(Register recv_klass,
- Register intf_klass,
- RegisterOrConstant itable_index,
- Register method_result,
- Register scan_temp,
- Label& L_no_such_interface) {
- assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
- assert(itable_index.is_constant() || itable_index.as_register() == method_result,
- "caller must use same register for non-constant itable index as for method");
-
- // Compute start of first itableOffsetEntry (which is at the end of the vtable)
- int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
- int itentry_off = itableMethodEntry::method_offset_in_bytes();
- int scan_step = itableOffsetEntry::size() * wordSize;
- int vte_size = vtableEntry::size() * wordSize;
- Address::ScaleFactor times_vte_scale = Address::times_ptr;
- assert(vte_size == wordSize, "else adjust times_vte_scale");
-
- movl(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize));
-
- // %%% Could store the aligned, prescaled offset in the klassoop.
- lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
- if (HeapWordsPerLong > 1) {
- // Round up to align_object_offset boundary
- // see code for InstanceKlass::start_of_itable!
- round_to(scan_temp, BytesPerLong);
- }
-
- // Adjust recv_klass by scaled itable_index, so we can free itable_index.
- assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
- lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
-
- // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
- // if (scan->interface() == intf) {
- // result = (klass + scan->offset() + itable_index);
- // }
- // }
- Label search, found_method;
-
- for (int peel = 1; peel >= 0; peel--) {
- movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
- cmpptr(intf_klass, method_result);
-
- if (peel) {
- jccb(Assembler::equal, found_method);
- } else {
- jccb(Assembler::notEqual, search);
- // (invert the test to fall through to found_method...)
- }
-
- if (!peel) break;
-
- bind(search);
-
- // Check that the previous entry is non-null. A null entry means that
- // the receiver class doesn't implement the interface, and wasn't the
- // same as when the caller was compiled.
- testptr(method_result, method_result);
- jcc(Assembler::zero, L_no_such_interface);
- addptr(scan_temp, scan_step);
- }
-
- bind(found_method);
-
- // Got a hit.
- movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
- movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
-}
-
-
-// virtual method calling
-void MacroAssembler::lookup_virtual_method(Register recv_klass,
- RegisterOrConstant vtable_index,
- Register method_result) {
- const int base = InstanceKlass::vtable_start_offset() * wordSize;
- assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
- Address vtable_entry_addr(recv_klass,
- vtable_index, Address::times_ptr,
- base + vtableEntry::method_offset_in_bytes());
- movptr(method_result, vtable_entry_addr);
-}
-
-
-void MacroAssembler::check_klass_subtype(Register sub_klass,
- Register super_klass,
- Register temp_reg,
- Label& L_success) {
- Label L_failure;
- check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL);
- check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
- bind(L_failure);
-}
-
-
-void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
- Register super_klass,
- Register temp_reg,
- Label* L_success,
- Label* L_failure,
- Label* L_slow_path,
- RegisterOrConstant super_check_offset) {
- assert_different_registers(sub_klass, super_klass, temp_reg);
- bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
- if (super_check_offset.is_register()) {
- assert_different_registers(sub_klass, super_klass,
- super_check_offset.as_register());
- } else if (must_load_sco) {
- assert(temp_reg != noreg, "supply either a temp or a register offset");
- }
-
- Label L_fallthrough;
- int label_nulls = 0;
- if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
- if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
- if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
- assert(label_nulls <= 1, "at most one NULL in the batch");
-
- int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
- int sco_offset = in_bytes(Klass::super_check_offset_offset());
- Address super_check_offset_addr(super_klass, sco_offset);
-
- // Hacked jcc, which "knows" that L_fallthrough, at least, is in
- // range of a jccb. If this routine grows larger, reconsider at
- // least some of these.
-#define local_jcc(assembler_cond, label) \
- if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \
- else jcc( assembler_cond, label) /*omit semi*/
-
- // Hacked jmp, which may only be used just before L_fallthrough.
-#define final_jmp(label) \
- if (&(label) == &L_fallthrough) { /*do nothing*/ } \
- else jmp(label) /*omit semi*/
-
- // If the pointers are equal, we are done (e.g., String[] elements).
- // This self-check enables sharing of secondary supertype arrays among
- // non-primary types such as array-of-interface. Otherwise, each such
- // type would need its own customized SSA.
- // We move this check to the front of the fast path because many
- // type checks are in fact trivially successful in this manner,
- // so we get a nicely predicted branch right at the start of the check.
- cmpptr(sub_klass, super_klass);
- local_jcc(Assembler::equal, *L_success);
-
- // Check the supertype display:
- if (must_load_sco) {
- // Positive movl does right thing on LP64.
- movl(temp_reg, super_check_offset_addr);
- super_check_offset = RegisterOrConstant(temp_reg);
- }
- Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
- cmpptr(super_klass, super_check_addr); // load displayed supertype
-
- // This check has worked decisively for primary supers.
- // Secondary supers are sought in the super_cache ('super_cache_addr').
- // (Secondary supers are interfaces and very deeply nested subtypes.)
- // This works in the same check above because of a tricky aliasing
- // between the super_cache and the primary super display elements.
- // (The 'super_check_addr' can address either, as the case requires.)
- // Note that the cache is updated below if it does not help us find
- // what we need immediately.
- // So if it was a primary super, we can just fail immediately.
- // Otherwise, it's the slow path for us (no success at this point).
-
- if (super_check_offset.is_register()) {
- local_jcc(Assembler::equal, *L_success);
- cmpl(super_check_offset.as_register(), sc_offset);
- if (L_failure == &L_fallthrough) {
- local_jcc(Assembler::equal, *L_slow_path);
- } else {
- local_jcc(Assembler::notEqual, *L_failure);
- final_jmp(*L_slow_path);
- }
- } else if (super_check_offset.as_constant() == sc_offset) {
- // Need a slow path; fast failure is impossible.
- if (L_slow_path == &L_fallthrough) {
- local_jcc(Assembler::equal, *L_success);
- } else {
- local_jcc(Assembler::notEqual, *L_slow_path);
- final_jmp(*L_success);
- }
- } else {
- // No slow path; it's a fast decision.
- if (L_failure == &L_fallthrough) {
- local_jcc(Assembler::equal, *L_success);
- } else {
- local_jcc(Assembler::notEqual, *L_failure);
- final_jmp(*L_success);
- }
- }
-
- bind(L_fallthrough);
-
-#undef local_jcc
-#undef final_jmp
-}
-
-
-void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
- Register super_klass,
- Register temp_reg,
- Register temp2_reg,
- Label* L_success,
- Label* L_failure,
- bool set_cond_codes) {
- assert_different_registers(sub_klass, super_klass, temp_reg);
- if (temp2_reg != noreg)
- assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
-#define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
-
- Label L_fallthrough;
- int label_nulls = 0;
- if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
- if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
- assert(label_nulls <= 1, "at most one NULL in the batch");
-
- // a couple of useful fields in sub_klass:
- int ss_offset = in_bytes(Klass::secondary_supers_offset());
- int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
- Address secondary_supers_addr(sub_klass, ss_offset);
- Address super_cache_addr( sub_klass, sc_offset);
-
- // Do a linear scan of the secondary super-klass chain.
- // This code is rarely used, so simplicity is a virtue here.
- // The repne_scan instruction uses fixed registers, which we must spill.
- // Don't worry too much about pre-existing connections with the input regs.
-
- assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
- assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
-
- // Get super_klass value into rax (even if it was in rdi or rcx).
- bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
- if (super_klass != rax || UseCompressedOops) {
- if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
- mov(rax, super_klass);
- }
- if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
- if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
-
-#ifndef PRODUCT
- int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
- ExternalAddress pst_counter_addr((address) pst_counter);
- NOT_LP64( incrementl(pst_counter_addr) );
- LP64_ONLY( lea(rcx, pst_counter_addr) );
- LP64_ONLY( incrementl(Address(rcx, 0)) );
-#endif //PRODUCT
-
- // We will consult the secondary-super array.
- movptr(rdi, secondary_supers_addr);
- // Load the array length. (Positive movl does right thing on LP64.)
- movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes()));
- // Skip to start of data.
- addptr(rdi, Array<Klass*>::base_offset_in_bytes());
-
- // Scan RCX words at [RDI] for an occurrence of RAX.
- // Set NZ/Z based on last compare.
- // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
- // not change flags (only scas instruction which is repeated sets flags).
- // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
-
- testptr(rax,rax); // Set Z = 0
- repne_scan();
-
- // Unspill the temp. registers:
- if (pushed_rdi) pop(rdi);
- if (pushed_rcx) pop(rcx);
- if (pushed_rax) pop(rax);
-
- if (set_cond_codes) {
- // Special hack for the AD files: rdi is guaranteed non-zero.
- assert(!pushed_rdi, "rdi must be left non-NULL");
- // Also, the condition codes are properly set Z/NZ on succeed/failure.
- }
-
- if (L_failure == &L_fallthrough)
- jccb(Assembler::notEqual, *L_failure);
- else jcc(Assembler::notEqual, *L_failure);
-
- // Success. Cache the super we found and proceed in triumph.
- movptr(super_cache_addr, super_klass);
-
- if (L_success != &L_fallthrough) {
- jmp(*L_success);
- }
-
-#undef IS_A_TEMP
-
- bind(L_fallthrough);
-}
-
-
-void MacroAssembler::cmov32(Condition cc, Register dst, Address src) {
- if (VM_Version::supports_cmov()) {
- cmovl(cc, dst, src);
- } else {
- Label L;
- jccb(negate_condition(cc), L);
- movl(dst, src);
- bind(L);
- }
-}
-
-void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
- if (VM_Version::supports_cmov()) {
- cmovl(cc, dst, src);
- } else {
- Label L;
- jccb(negate_condition(cc), L);
- movl(dst, src);
- bind(L);
- }
-}
-
-void MacroAssembler::verify_oop(Register reg, const char* s) {
- if (!VerifyOops) return;
-
- // Pass register number to verify_oop_subroutine
- char* b = new char[strlen(s) + 50];
- sprintf(b, "verify_oop: %s: %s", reg->name(), s);
- BLOCK_COMMENT("verify_oop {");
-#ifdef _LP64
- push(rscratch1); // save r10, trashed by movptr()
-#endif
- push(rax); // save rax,
- push(reg); // pass register argument
- ExternalAddress buffer((address) b);
- // avoid using pushptr, as it modifies scratch registers
- // and our contract is not to modify anything
- movptr(rax, buffer.addr());
- push(rax);
- // call indirectly to solve generation ordering problem
- movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
- call(rax);
- // Caller pops the arguments (oop, message) and restores rax, r10
- BLOCK_COMMENT("} verify_oop");
-}
-
-
-RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
- Register tmp,
- int offset) {
- intptr_t value = *delayed_value_addr;
- if (value != 0)
- return RegisterOrConstant(value + offset);
-
- // load indirectly to solve generation ordering problem
- movptr(tmp, ExternalAddress((address) delayed_value_addr));
-
-#ifdef ASSERT
- { Label L;
- testptr(tmp, tmp);
- if (WizardMode) {
- jcc(Assembler::notZero, L);
- char* buf = new char[40];
- sprintf(buf, "DelayedValue="INTPTR_FORMAT, delayed_value_addr[1]);
- STOP(buf);
- } else {
- jccb(Assembler::notZero, L);
- hlt();
- }
- bind(L);
- }
-#endif
-
- if (offset != 0)
- addptr(tmp, offset);
-
- return RegisterOrConstant(tmp);
-}
-
-
-Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
- int extra_slot_offset) {
- // cf. TemplateTable::prepare_invoke(), if (load_receiver).
- int stackElementSize = Interpreter::stackElementSize;
- int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
-#ifdef ASSERT
- int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
- assert(offset1 - offset == stackElementSize, "correct arithmetic");
-#endif
- Register scale_reg = noreg;
- Address::ScaleFactor scale_factor = Address::no_scale;
- if (arg_slot.is_constant()) {
- offset += arg_slot.as_constant() * stackElementSize;
- } else {
- scale_reg = arg_slot.as_register();
- scale_factor = Address::times(stackElementSize);
- }
- offset += wordSize; // return PC is on stack
- return Address(rsp, scale_reg, scale_factor, offset);
-}
-
-
-void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
- if (!VerifyOops) return;
-
- // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
- // Pass register number to verify_oop_subroutine
- char* b = new char[strlen(s) + 50];
- sprintf(b, "verify_oop_addr: %s", s);
-
-#ifdef _LP64
- push(rscratch1); // save r10, trashed by movptr()
-#endif
- push(rax); // save rax,
- // addr may contain rsp so we will have to adjust it based on the push
- // we just did (and on 64 bit we do two pushes)
- // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
- // stores rax into addr which is backwards of what was intended.
- if (addr.uses(rsp)) {
- lea(rax, addr);
- pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
- } else {
- pushptr(addr);
- }
-
- ExternalAddress buffer((address) b);
- // pass msg argument
- // avoid using pushptr, as it modifies scratch registers
- // and our contract is not to modify anything
- movptr(rax, buffer.addr());
- push(rax);
-
- // call indirectly to solve generation ordering problem
- movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
- call(rax);
- // Caller pops the arguments (addr, message) and restores rax, r10.
-}
-
-void MacroAssembler::verify_tlab() {
-#ifdef ASSERT
- if (UseTLAB && VerifyOops) {
- Label next, ok;
- Register t1 = rsi;
- Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread);
-
- push(t1);
- NOT_LP64(push(thread_reg));
- NOT_LP64(get_thread(thread_reg));
-
- movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
- cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
- jcc(Assembler::aboveEqual, next);
- STOP("assert(top >= start)");
- should_not_reach_here();
-
- bind(next);
- movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
- cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
- jcc(Assembler::aboveEqual, ok);
- STOP("assert(top <= end)");
- should_not_reach_here();
-
- bind(ok);
- NOT_LP64(pop(thread_reg));
- pop(t1);
- }
-#endif
-}
-
-class ControlWord {
- public:
- int32_t _value;
-
- int rounding_control() const { return (_value >> 10) & 3 ; }
- int precision_control() const { return (_value >> 8) & 3 ; }
- bool precision() const { return ((_value >> 5) & 1) != 0; }
- bool underflow() const { return ((_value >> 4) & 1) != 0; }
- bool overflow() const { return ((_value >> 3) & 1) != 0; }
- bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
- bool denormalized() const { return ((_value >> 1) & 1) != 0; }
- bool invalid() const { return ((_value >> 0) & 1) != 0; }
-
- void print() const {
- // rounding control
- const char* rc;
- switch (rounding_control()) {
- case 0: rc = "round near"; break;
- case 1: rc = "round down"; break;
- case 2: rc = "round up "; break;
- case 3: rc = "chop "; break;
- };
- // precision control
- const char* pc;
- switch (precision_control()) {
- case 0: pc = "24 bits "; break;
- case 1: pc = "reserved"; break;
- case 2: pc = "53 bits "; break;
- case 3: pc = "64 bits "; break;
- };
- // flags
- char f[9];
- f[0] = ' ';
- f[1] = ' ';
- f[2] = (precision ()) ? 'P' : 'p';
- f[3] = (underflow ()) ? 'U' : 'u';
- f[4] = (overflow ()) ? 'O' : 'o';
- f[5] = (zero_divide ()) ? 'Z' : 'z';
- f[6] = (denormalized()) ? 'D' : 'd';
- f[7] = (invalid ()) ? 'I' : 'i';
- f[8] = '\x0';
- // output
- printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
- }
-
-};
-
-class StatusWord {
- public:
- int32_t _value;
-
- bool busy() const { return ((_value >> 15) & 1) != 0; }
- bool C3() const { return ((_value >> 14) & 1) != 0; }
- bool C2() const { return ((_value >> 10) & 1) != 0; }
- bool C1() const { return ((_value >> 9) & 1) != 0; }
- bool C0() const { return ((_value >> 8) & 1) != 0; }
- int top() const { return (_value >> 11) & 7 ; }
- bool error_status() const { return ((_value >> 7) & 1) != 0; }
- bool stack_fault() const { return ((_value >> 6) & 1) != 0; }
- bool precision() const { return ((_value >> 5) & 1) != 0; }
- bool underflow() const { return ((_value >> 4) & 1) != 0; }
- bool overflow() const { return ((_value >> 3) & 1) != 0; }
- bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
- bool denormalized() const { return ((_value >> 1) & 1) != 0; }
- bool invalid() const { return ((_value >> 0) & 1) != 0; }
-
- void print() const {
- // condition codes
- char c[5];
- c[0] = (C3()) ? '3' : '-';
- c[1] = (C2()) ? '2' : '-';
- c[2] = (C1()) ? '1' : '-';
- c[3] = (C0()) ? '0' : '-';
- c[4] = '\x0';
- // flags
- char f[9];
- f[0] = (error_status()) ? 'E' : '-';
- f[1] = (stack_fault ()) ? 'S' : '-';
- f[2] = (precision ()) ? 'P' : '-';
- f[3] = (underflow ()) ? 'U' : '-';
- f[4] = (overflow ()) ? 'O' : '-';
- f[5] = (zero_divide ()) ? 'Z' : '-';
- f[6] = (denormalized()) ? 'D' : '-';
- f[7] = (invalid ()) ? 'I' : '-';
- f[8] = '\x0';
- // output
- printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top());
- }
-
-};
-
-class TagWord {
- public:
- int32_t _value;
-
- int tag_at(int i) const { return (_value >> (i*2)) & 3; }
-
- void print() const {
- printf("%04x", _value & 0xFFFF);
- }
-
-};
-
-class FPU_Register {
- public:
- int32_t _m0;
- int32_t _m1;
- int16_t _ex;
-
- bool is_indefinite() const {
- return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
- }
-
- void print() const {
- char sign = (_ex < 0) ? '-' : '+';
- const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " ";
- printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind);
- };
-
-};
-
-class FPU_State {
- public:
- enum {
- register_size = 10,
- number_of_registers = 8,
- register_mask = 7
- };
-
- ControlWord _control_word;
- StatusWord _status_word;
- TagWord _tag_word;
- int32_t _error_offset;
- int32_t _error_selector;
- int32_t _data_offset;
- int32_t _data_selector;
- int8_t _register[register_size * number_of_registers];
-
- int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
- FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; }
-
- const char* tag_as_string(int tag) const {
- switch (tag) {
- case 0: return "valid";
- case 1: return "zero";
- case 2: return "special";
- case 3: return "empty";
- }
- ShouldNotReachHere();
- return NULL;
- }
-
- void print() const {
- // print computation registers
- { int t = _status_word.top();
- for (int i = 0; i < number_of_registers; i++) {
- int j = (i - t) & register_mask;
- printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
- st(j)->print();
- printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
- }
- }
- printf("\n");
- // print control registers
- printf("ctrl = "); _control_word.print(); printf("\n");
- printf("stat = "); _status_word .print(); printf("\n");
- printf("tags = "); _tag_word .print(); printf("\n");
- }
-
-};
-
-class Flag_Register {
- public:
- int32_t _value;
-
- bool overflow() const { return ((_value >> 11) & 1) != 0; }
- bool direction() const { return ((_value >> 10) & 1) != 0; }
- bool sign() const { return ((_value >> 7) & 1) != 0; }
- bool zero() const { return ((_value >> 6) & 1) != 0; }
- bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; }
- bool parity() const { return ((_value >> 2) & 1) != 0; }
- bool carry() const { return ((_value >> 0) & 1) != 0; }
-
- void print() const {
- // flags
- char f[8];
- f[0] = (overflow ()) ? 'O' : '-';
- f[1] = (direction ()) ? 'D' : '-';
- f[2] = (sign ()) ? 'S' : '-';
- f[3] = (zero ()) ? 'Z' : '-';
- f[4] = (auxiliary_carry()) ? 'A' : '-';
- f[5] = (parity ()) ? 'P' : '-';
- f[6] = (carry ()) ? 'C' : '-';
- f[7] = '\x0';
- // output
- printf("%08x flags = %s", _value, f);
- }
-
-};
-
-class IU_Register {
- public:
- int32_t _value;
-
- void print() const {
- printf("%08x %11d", _value, _value);
- }
-
-};
-
-class IU_State {
- public:
- Flag_Register _eflags;
- IU_Register _rdi;
- IU_Register _rsi;
- IU_Register _rbp;
- IU_Register _rsp;
- IU_Register _rbx;
- IU_Register _rdx;
- IU_Register _rcx;
- IU_Register _rax;
-
- void print() const {
- // computation registers
- printf("rax, = "); _rax.print(); printf("\n");
- printf("rbx, = "); _rbx.print(); printf("\n");
- printf("rcx = "); _rcx.print(); printf("\n");
- printf("rdx = "); _rdx.print(); printf("\n");
- printf("rdi = "); _rdi.print(); printf("\n");
- printf("rsi = "); _rsi.print(); printf("\n");
- printf("rbp, = "); _rbp.print(); printf("\n");
- printf("rsp = "); _rsp.print(); printf("\n");
- printf("\n");
- // control registers
- printf("flgs = "); _eflags.print(); printf("\n");
- }
-};
-
-
-class CPU_State {
- public:
- FPU_State _fpu_state;
- IU_State _iu_state;
-
- void print() const {
- printf("--------------------------------------------------\n");
- _iu_state .print();
- printf("\n");
- _fpu_state.print();
- printf("--------------------------------------------------\n");
- }
-
-};
-
-
-static void _print_CPU_state(CPU_State* state) {
- state->print();
-};
-
-
-void MacroAssembler::print_CPU_state() {
- push_CPU_state();
- push(rsp); // pass CPU state
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
- addptr(rsp, wordSize); // discard argument
- pop_CPU_state();
-}
-
-
-static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) {
- static int counter = 0;
- FPU_State* fs = &state->_fpu_state;
- counter++;
- // For leaf calls, only verify that the top few elements remain empty.
- // We only need 1 empty at the top for C2 code.
- if( stack_depth < 0 ) {
- if( fs->tag_for_st(7) != 3 ) {
- printf("FPR7 not empty\n");
- state->print();
- assert(false, "error");
- return false;
- }
- return true; // All other stack states do not matter
- }
-
- assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std,
- "bad FPU control word");
-
- // compute stack depth
- int i = 0;
- while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++;
- int d = i;
- while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++;
- // verify findings
- if (i != FPU_State::number_of_registers) {
- // stack not contiguous
- printf("%s: stack not contiguous at ST%d\n", s, i);
- state->print();
- assert(false, "error");
- return false;
- }
- // check if computed stack depth corresponds to expected stack depth
- if (stack_depth < 0) {
- // expected stack depth is -stack_depth or less
- if (d > -stack_depth) {
- // too many elements on the stack
- printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d);
- state->print();
- assert(false, "error");
- return false;
- }
- } else {
- // expected stack depth is stack_depth
- if (d != stack_depth) {
- // wrong stack depth
- printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d);
- state->print();
- assert(false, "error");
- return false;
- }
- }
- // everything is cool
- return true;
-}
-
-
-void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
- if (!VerifyFPU) return;
- push_CPU_state();
- push(rsp); // pass CPU state
- ExternalAddress msg((address) s);
- // pass message string s
- pushptr(msg.addr());
- push(stack_depth); // pass stack depth
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU)));
- addptr(rsp, 3 * wordSize); // discard arguments
- // check for error
- { Label L;
- testl(rax, rax);
- jcc(Assembler::notZero, L);
- int3(); // break if error condition
- bind(L);
- }
- pop_CPU_state();
-}
-
-void MacroAssembler::load_klass(Register dst, Register src) {
-#ifdef _LP64
- if (UseCompressedKlassPointers) {
- movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
- decode_klass_not_null(dst);
- } else
-#endif
- movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
-}
-
-void MacroAssembler::load_prototype_header(Register dst, Register src) {
-#ifdef _LP64
- if (UseCompressedKlassPointers) {
- assert (Universe::heap() != NULL, "java heap should be initialized");
- movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
- if (Universe::narrow_klass_shift() != 0) {
- assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
- assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
- movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset()));
- } else {
- movq(dst, Address(dst, Klass::prototype_header_offset()));
- }
- } else
-#endif
- {
- movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
- movptr(dst, Address(dst, Klass::prototype_header_offset()));
- }
-}
-
-void MacroAssembler::store_klass(Register dst, Register src) {
-#ifdef _LP64
- if (UseCompressedKlassPointers) {
- encode_klass_not_null(src);
- movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
- } else
-#endif
- movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
-}
-
-void MacroAssembler::load_heap_oop(Register dst, Address src) {
-#ifdef _LP64
- // FIXME: Must change all places where we try to load the klass.
- if (UseCompressedOops) {
- movl(dst, src);
- decode_heap_oop(dst);
- } else
-#endif
- movptr(dst, src);
-}
-
-// Doesn't do verfication, generates fixed size code
-void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) {
-#ifdef _LP64
- if (UseCompressedOops) {
- movl(dst, src);
- decode_heap_oop_not_null(dst);
- } else
-#endif
- movptr(dst, src);
-}
-
-void MacroAssembler::store_heap_oop(Address dst, Register src) {
-#ifdef _LP64
- if (UseCompressedOops) {
- assert(!dst.uses(src), "not enough registers");
- encode_heap_oop(src);
- movl(dst, src);
- } else
-#endif
- movptr(dst, src);
-}
-
-void MacroAssembler::cmp_heap_oop(Register src1, Address src2, Register tmp) {
- assert_different_registers(src1, tmp);
-#ifdef _LP64
- if (UseCompressedOops) {
- bool did_push = false;
- if (tmp == noreg) {
- tmp = rax;
- push(tmp);
- did_push = true;
- assert(!src2.uses(rsp), "can't push");
- }
- load_heap_oop(tmp, src2);
- cmpptr(src1, tmp);
- if (did_push) pop(tmp);
- } else
-#endif
- cmpptr(src1, src2);
-}
-
-// Used for storing NULLs.
-void MacroAssembler::store_heap_oop_null(Address dst) {
-#ifdef _LP64
- if (UseCompressedOops) {
- movl(dst, (int32_t)NULL_WORD);
- } else {
- movslq(dst, (int32_t)NULL_WORD);
- }
-#else
- movl(dst, (int32_t)NULL_WORD);
-#endif
-}
-
-#ifdef _LP64
-void MacroAssembler::store_klass_gap(Register dst, Register src) {
- if (UseCompressedKlassPointers) {
- // Store to klass gap in destination
- movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
- }
-}
-
-#ifdef ASSERT
-void MacroAssembler::verify_heapbase(const char* msg) {
- assert (UseCompressedOops || UseCompressedKlassPointers, "should be compressed");
- assert (Universe::heap() != NULL, "java heap should be initialized");
- if (CheckCompressedOops) {
- Label ok;
- push(rscratch1); // cmpptr trashes rscratch1
- cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
- jcc(Assembler::equal, ok);
- STOP(msg);
- bind(ok);
- pop(rscratch1);
- }
-}
-#endif
-
-// Algorithm must match oop.inline.hpp encode_heap_oop.
-void MacroAssembler::encode_heap_oop(Register r) {
-#ifdef ASSERT
- verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
-#endif
- verify_oop(r, "broken oop in encode_heap_oop");
- if (Universe::narrow_oop_base() == NULL) {
- if (Universe::narrow_oop_shift() != 0) {
- assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
- shrq(r, LogMinObjAlignmentInBytes);
- }
- return;
- }
- testq(r, r);
- cmovq(Assembler::equal, r, r12_heapbase);
- subq(r, r12_heapbase);
- shrq(r, LogMinObjAlignmentInBytes);
-}
-
-void MacroAssembler::encode_heap_oop_not_null(Register r) {
-#ifdef ASSERT
- verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
- if (CheckCompressedOops) {
- Label ok;
- testq(r, r);
- jcc(Assembler::notEqual, ok);
- STOP("null oop passed to encode_heap_oop_not_null");
- bind(ok);
- }
-#endif
- verify_oop(r, "broken oop in encode_heap_oop_not_null");
- if (Universe::narrow_oop_base() != NULL) {
- subq(r, r12_heapbase);
- }
- if (Universe::narrow_oop_shift() != 0) {
- assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
- shrq(r, LogMinObjAlignmentInBytes);
- }
-}
-
-void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
-#ifdef ASSERT
- verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
- if (CheckCompressedOops) {
- Label ok;
- testq(src, src);
- jcc(Assembler::notEqual, ok);
- STOP("null oop passed to encode_heap_oop_not_null2");
- bind(ok);
- }
-#endif
- verify_oop(src, "broken oop in encode_heap_oop_not_null2");
- if (dst != src) {
- movq(dst, src);
- }
- if (Universe::narrow_oop_base() != NULL) {
- subq(dst, r12_heapbase);
- }
- if (Universe::narrow_oop_shift() != 0) {
- assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
- shrq(dst, LogMinObjAlignmentInBytes);
- }
-}
-
-void MacroAssembler::decode_heap_oop(Register r) {
-#ifdef ASSERT
- verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
-#endif
- if (Universe::narrow_oop_base() == NULL) {
- if (Universe::narrow_oop_shift() != 0) {
- assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
- shlq(r, LogMinObjAlignmentInBytes);
- }
- } else {
- Label done;
- shlq(r, LogMinObjAlignmentInBytes);
- jccb(Assembler::equal, done);
- addq(r, r12_heapbase);
- bind(done);
- }
- verify_oop(r, "broken oop in decode_heap_oop");
-}
-
-void MacroAssembler::decode_heap_oop_not_null(Register r) {
- // Note: it will change flags
- assert (UseCompressedOops, "should only be used for compressed headers");
- assert (Universe::heap() != NULL, "java heap should be initialized");
- // Cannot assert, unverified entry point counts instructions (see .ad file)
- // vtableStubs also counts instructions in pd_code_size_limit.
- // Also do not verify_oop as this is called by verify_oop.
- if (Universe::narrow_oop_shift() != 0) {
- assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
- shlq(r, LogMinObjAlignmentInBytes);
- if (Universe::narrow_oop_base() != NULL) {
- addq(r, r12_heapbase);
- }
- } else {
- assert (Universe::narrow_oop_base() == NULL, "sanity");
- }
-}
-
-void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
- // Note: it will change flags
- assert (UseCompressedOops, "should only be used for compressed headers");
- assert (Universe::heap() != NULL, "java heap should be initialized");
- // Cannot assert, unverified entry point counts instructions (see .ad file)
- // vtableStubs also counts instructions in pd_code_size_limit.
- // Also do not verify_oop as this is called by verify_oop.
- if (Universe::narrow_oop_shift() != 0) {
- assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
- if (LogMinObjAlignmentInBytes == Address::times_8) {
- leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
- } else {
- if (dst != src) {
- movq(dst, src);
- }
- shlq(dst, LogMinObjAlignmentInBytes);
- if (Universe::narrow_oop_base() != NULL) {
- addq(dst, r12_heapbase);
- }
- }
- } else {
- assert (Universe::narrow_oop_base() == NULL, "sanity");
- if (dst != src) {
- movq(dst, src);
- }
- }
-}
-
-void MacroAssembler::encode_klass_not_null(Register r) {
- assert(Metaspace::is_initialized(), "metaspace should be initialized");
-#ifdef ASSERT
- verify_heapbase("MacroAssembler::encode_klass_not_null: heap base corrupted?");
-#endif
- if (Universe::narrow_klass_base() != NULL) {
- subq(r, r12_heapbase);
- }
- if (Universe::narrow_klass_shift() != 0) {
- assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
- shrq(r, LogKlassAlignmentInBytes);
- }
-}
-
-void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
- assert(Metaspace::is_initialized(), "metaspace should be initialized");
-#ifdef ASSERT
- verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?");
-#endif
- if (dst != src) {
- movq(dst, src);
- }
- if (Universe::narrow_klass_base() != NULL) {
- subq(dst, r12_heapbase);
- }
- if (Universe::narrow_klass_shift() != 0) {
- assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
- shrq(dst, LogKlassAlignmentInBytes);
- }
-}
-
-void MacroAssembler::decode_klass_not_null(Register r) {
- assert(Metaspace::is_initialized(), "metaspace should be initialized");
- // Note: it will change flags
- assert (UseCompressedKlassPointers, "should only be used for compressed headers");
- // Cannot assert, unverified entry point counts instructions (see .ad file)
- // vtableStubs also counts instructions in pd_code_size_limit.
- // Also do not verify_oop as this is called by verify_oop.
- if (Universe::narrow_klass_shift() != 0) {
- assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
- shlq(r, LogKlassAlignmentInBytes);
- if (Universe::narrow_klass_base() != NULL) {
- addq(r, r12_heapbase);
- }
- } else {
- assert (Universe::narrow_klass_base() == NULL, "sanity");
- }
-}
-
-void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
- assert(Metaspace::is_initialized(), "metaspace should be initialized");
- // Note: it will change flags
- assert (UseCompressedKlassPointers, "should only be used for compressed headers");
- // Cannot assert, unverified entry point counts instructions (see .ad file)
- // vtableStubs also counts instructions in pd_code_size_limit.
- // Also do not verify_oop as this is called by verify_oop.
- if (Universe::narrow_klass_shift() != 0) {
- assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
- assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
- leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
- } else {
- assert (Universe::narrow_klass_base() == NULL, "sanity");
- if (dst != src) {
- movq(dst, src);
- }
- }
-}
-
-void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
- assert (UseCompressedOops, "should only be used for compressed headers");
- assert (Universe::heap() != NULL, "java heap should be initialized");
- assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
- int oop_index = oop_recorder()->find_index(obj);
- RelocationHolder rspec = oop_Relocation::spec(oop_index);
- mov_narrow_oop(dst, oop_index, rspec);
-}
-
-void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
- assert (UseCompressedOops, "should only be used for compressed headers");
- assert (Universe::heap() != NULL, "java heap should be initialized");
- assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
- int oop_index = oop_recorder()->find_index(obj);
- RelocationHolder rspec = oop_Relocation::spec(oop_index);
- mov_narrow_oop(dst, oop_index, rspec);
-}
-
-void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
- assert (UseCompressedKlassPointers, "should only be used for compressed headers");
- assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
- int klass_index = oop_recorder()->find_index(k);
- RelocationHolder rspec = metadata_Relocation::spec(klass_index);
- mov_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
-}
-
-void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
- assert (UseCompressedKlassPointers, "should only be used for compressed headers");
- assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
- int klass_index = oop_recorder()->find_index(k);
- RelocationHolder rspec = metadata_Relocation::spec(klass_index);
- mov_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
-}
-
-void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
- assert (UseCompressedOops, "should only be used for compressed headers");
- assert (Universe::heap() != NULL, "java heap should be initialized");
- assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
- int oop_index = oop_recorder()->find_index(obj);
- RelocationHolder rspec = oop_Relocation::spec(oop_index);
- Assembler::cmp_narrow_oop(dst, oop_index, rspec);
-}
-
-void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
- assert (UseCompressedOops, "should only be used for compressed headers");
- assert (Universe::heap() != NULL, "java heap should be initialized");
- assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
- int oop_index = oop_recorder()->find_index(obj);
- RelocationHolder rspec = oop_Relocation::spec(oop_index);
- Assembler::cmp_narrow_oop(dst, oop_index, rspec);
-}
-
-void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
- assert (UseCompressedKlassPointers, "should only be used for compressed headers");
- assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
- int klass_index = oop_recorder()->find_index(k);
- RelocationHolder rspec = metadata_Relocation::spec(klass_index);
- Assembler::cmp_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
-}
-
-void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
- assert (UseCompressedKlassPointers, "should only be used for compressed headers");
- assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
- int klass_index = oop_recorder()->find_index(k);
- RelocationHolder rspec = metadata_Relocation::spec(klass_index);
- Assembler::cmp_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
-}
-
-void MacroAssembler::reinit_heapbase() {
- if (UseCompressedOops || UseCompressedKlassPointers) {
- movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
- }
-}
-#endif // _LP64
-
-
-// C2 compiled method's prolog code.
-void MacroAssembler::verified_entry(int framesize, bool stack_bang, bool fp_mode_24b) {
-
- // WARNING: Initial instruction MUST be 5 bytes or longer so that
- // NativeJump::patch_verified_entry will be able to patch out the entry
- // code safely. The push to verify stack depth is ok at 5 bytes,
- // the frame allocation can be either 3 or 6 bytes. So if we don't do
- // stack bang then we must use the 6 byte frame allocation even if
- // we have no frame. :-(
-
- assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
- // Remove word for return addr
- framesize -= wordSize;
-
- // Calls to C2R adapters often do not accept exceptional returns.
- // We require that their callers must bang for them. But be careful, because
- // some VM calls (such as call site linkage) can use several kilobytes of
- // stack. But the stack safety zone should account for that.
- // See bugs 4446381, 4468289, 4497237.
- if (stack_bang) {
- generate_stack_overflow_check(framesize);
-
- // We always push rbp, so that on return to interpreter rbp, will be
- // restored correctly and we can correct the stack.
- push(rbp);
- // Remove word for ebp
- framesize -= wordSize;
-
- // Create frame
- if (framesize) {
- subptr(rsp, framesize);
- }
- } else {
- // Create frame (force generation of a 4 byte immediate value)
- subptr_imm32(rsp, framesize);
-
- // Save RBP register now.
- framesize -= wordSize;
- movptr(Address(rsp, framesize), rbp);
- }
-
- if (VerifyStackAtCalls) { // Majik cookie to verify stack depth
- framesize -= wordSize;
- movptr(Address(rsp, framesize), (int32_t)0xbadb100d);
- }
-
-#ifndef _LP64
- // If method sets FPU control word do it now
- if (fp_mode_24b) {
- fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
- }
- if (UseSSE >= 2 && VerifyFPU) {
- verify_FPU(0, "FPU stack must be clean on entry");
- }
-#endif
-
-#ifdef ASSERT
- if (VerifyStackAtCalls) {
- Label L;
- push(rax);
- mov(rax, rsp);
- andptr(rax, StackAlignmentInBytes-1);
- cmpptr(rax, StackAlignmentInBytes-wordSize);
- pop(rax);
- jcc(Assembler::equal, L);
- STOP("Stack is not properly aligned!");
- bind(L);
- }
-#endif
-
-}
-
-
-// IndexOf for constant substrings with size >= 8 chars
-// which don't need to be loaded through stack.
-void MacroAssembler::string_indexofC8(Register str1, Register str2,
- Register cnt1, Register cnt2,
- int int_cnt2, Register result,
- XMMRegister vec, Register tmp) {
- ShortBranchVerifier sbv(this);
- assert(UseSSE42Intrinsics, "SSE4.2 is required");
-
- // This method uses pcmpestri inxtruction with bound registers
- // inputs:
- // xmm - substring
- // rax - substring length (elements count)
- // mem - scanned string
- // rdx - string length (elements count)
- // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
- // outputs:
- // rcx - matched index in string
- assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
-
- Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR,
- RET_FOUND, RET_NOT_FOUND, EXIT, FOUND_SUBSTR,
- MATCH_SUBSTR_HEAD, RELOAD_STR, FOUND_CANDIDATE;
-
- // Note, inline_string_indexOf() generates checks:
- // if (substr.count > string.count) return -1;
- // if (substr.count == 0) return 0;
- assert(int_cnt2 >= 8, "this code isused only for cnt2 >= 8 chars");
-
- // Load substring.
- movdqu(vec, Address(str2, 0));
- movl(cnt2, int_cnt2);
- movptr(result, str1); // string addr
-
- if (int_cnt2 > 8) {
- jmpb(SCAN_TO_SUBSTR);
-
- // Reload substr for rescan, this code
- // is executed only for large substrings (> 8 chars)
- bind(RELOAD_SUBSTR);
- movdqu(vec, Address(str2, 0));
- negptr(cnt2); // Jumped here with negative cnt2, convert to positive
-
- bind(RELOAD_STR);
- // We came here after the beginning of the substring was
- // matched but the rest of it was not so we need to search
- // again. Start from the next element after the previous match.
-
- // cnt2 is number of substring reminding elements and
- // cnt1 is number of string reminding elements when cmp failed.
- // Restored cnt1 = cnt1 - cnt2 + int_cnt2
- subl(cnt1, cnt2);
- addl(cnt1, int_cnt2);
- movl(cnt2, int_cnt2); // Now restore cnt2
-
- decrementl(cnt1); // Shift to next element
- cmpl(cnt1, cnt2);
- jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
-
- addptr(result, 2);
-
- } // (int_cnt2 > 8)
-
- // Scan string for start of substr in 16-byte vectors
- bind(SCAN_TO_SUBSTR);
- pcmpestri(vec, Address(result, 0), 0x0d);
- jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1
- subl(cnt1, 8);
- jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
- cmpl(cnt1, cnt2);
- jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
- addptr(result, 16);
- jmpb(SCAN_TO_SUBSTR);
-
- // Found a potential substr
- bind(FOUND_CANDIDATE);
- // Matched whole vector if first element matched (tmp(rcx) == 0).
- if (int_cnt2 == 8) {
- jccb(Assembler::overflow, RET_FOUND); // OF == 1
- } else { // int_cnt2 > 8
- jccb(Assembler::overflow, FOUND_SUBSTR);
- }
- // After pcmpestri tmp(rcx) contains matched element index
- // Compute start addr of substr
- lea(result, Address(result, tmp, Address::times_2));
-
- // Make sure string is still long enough
- subl(cnt1, tmp);
- cmpl(cnt1, cnt2);
- if (int_cnt2 == 8) {
- jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
- } else { // int_cnt2 > 8
- jccb(Assembler::greaterEqual, MATCH_SUBSTR_HEAD);
- }
- // Left less then substring.
-
- bind(RET_NOT_FOUND);
- movl(result, -1);
- jmpb(EXIT);
-
- if (int_cnt2 > 8) {
- // This code is optimized for the case when whole substring
- // is matched if its head is matched.
- bind(MATCH_SUBSTR_HEAD);
- pcmpestri(vec, Address(result, 0), 0x0d);
- // Reload only string if does not match
- jccb(Assembler::noOverflow, RELOAD_STR); // OF == 0
-
- Label CONT_SCAN_SUBSTR;
- // Compare the rest of substring (> 8 chars).
- bind(FOUND_SUBSTR);
- // First 8 chars are already matched.
- negptr(cnt2);
- addptr(cnt2, 8);
-
- bind(SCAN_SUBSTR);
- subl(cnt1, 8);
- cmpl(cnt2, -8); // Do not read beyond substring
- jccb(Assembler::lessEqual, CONT_SCAN_SUBSTR);
- // Back-up strings to avoid reading beyond substring:
- // cnt1 = cnt1 - cnt2 + 8
- addl(cnt1, cnt2); // cnt2 is negative
- addl(cnt1, 8);
- movl(cnt2, 8); negptr(cnt2);
- bind(CONT_SCAN_SUBSTR);
- if (int_cnt2 < (int)G) {
- movdqu(vec, Address(str2, cnt2, Address::times_2, int_cnt2*2));
- pcmpestri(vec, Address(result, cnt2, Address::times_2, int_cnt2*2), 0x0d);
- } else {
- // calculate index in register to avoid integer overflow (int_cnt2*2)
- movl(tmp, int_cnt2);
- addptr(tmp, cnt2);
- movdqu(vec, Address(str2, tmp, Address::times_2, 0));
- pcmpestri(vec, Address(result, tmp, Address::times_2, 0), 0x0d);
- }
- // Need to reload strings pointers if not matched whole vector
- jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
- addptr(cnt2, 8);
- jcc(Assembler::negative, SCAN_SUBSTR);
- // Fall through if found full substring
-
- } // (int_cnt2 > 8)
-
- bind(RET_FOUND);
- // Found result if we matched full small substring.
- // Compute substr offset
- subptr(result, str1);
- shrl(result, 1); // index
- bind(EXIT);
-
-} // string_indexofC8
-
-// Small strings are loaded through stack if they cross page boundary.
-void MacroAssembler::string_indexof(Register str1, Register str2,
- Register cnt1, Register cnt2,
- int int_cnt2, Register result,
- XMMRegister vec, Register tmp) {
- ShortBranchVerifier sbv(this);
- assert(UseSSE42Intrinsics, "SSE4.2 is required");
- //
- // int_cnt2 is length of small (< 8 chars) constant substring
- // or (-1) for non constant substring in which case its length
- // is in cnt2 register.
- //
- // Note, inline_string_indexOf() generates checks:
- // if (substr.count > string.count) return -1;
- // if (substr.count == 0) return 0;
- //
- assert(int_cnt2 == -1 || (0 < int_cnt2 && int_cnt2 < 8), "should be != 0");
-
- // This method uses pcmpestri inxtruction with bound registers
- // inputs:
- // xmm - substring
- // rax - substring length (elements count)
- // mem - scanned string
- // rdx - string length (elements count)
- // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
- // outputs:
- // rcx - matched index in string
- assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
-
- Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, ADJUST_STR,
- RET_FOUND, RET_NOT_FOUND, CLEANUP, FOUND_SUBSTR,
- FOUND_CANDIDATE;
-
- { //========================================================
- // We don't know where these strings are located
- // and we can't read beyond them. Load them through stack.
- Label BIG_STRINGS, CHECK_STR, COPY_SUBSTR, COPY_STR;
-
- movptr(tmp, rsp); // save old SP
-
- if (int_cnt2 > 0) { // small (< 8 chars) constant substring
- if (int_cnt2 == 1) { // One char
- load_unsigned_short(result, Address(str2, 0));
- movdl(vec, result); // move 32 bits
- } else if (int_cnt2 == 2) { // Two chars
- movdl(vec, Address(str2, 0)); // move 32 bits
- } else if (int_cnt2 == 4) { // Four chars
- movq(vec, Address(str2, 0)); // move 64 bits
- } else { // cnt2 = { 3, 5, 6, 7 }
- // Array header size is 12 bytes in 32-bit VM
- // + 6 bytes for 3 chars == 18 bytes,
- // enough space to load vec and shift.
- assert(HeapWordSize*TypeArrayKlass::header_size() >= 12,"sanity");
- movdqu(vec, Address(str2, (int_cnt2*2)-16));
- psrldq(vec, 16-(int_cnt2*2));
- }
- } else { // not constant substring
- cmpl(cnt2, 8);
- jccb(Assembler::aboveEqual, BIG_STRINGS); // Both strings are big enough
-
- // We can read beyond string if srt+16 does not cross page boundary
- // since heaps are aligned and mapped by pages.
- assert(os::vm_page_size() < (int)G, "default page should be small");
- movl(result, str2); // We need only low 32 bits
- andl(result, (os::vm_page_size()-1));
- cmpl(result, (os::vm_page_size()-16));
- jccb(Assembler::belowEqual, CHECK_STR);
-
- // Move small strings to stack to allow load 16 bytes into vec.
- subptr(rsp, 16);
- int stk_offset = wordSize-2;
- push(cnt2);
-
- bind(COPY_SUBSTR);
- load_unsigned_short(result, Address(str2, cnt2, Address::times_2, -2));
- movw(Address(rsp, cnt2, Address::times_2, stk_offset), result);
- decrement(cnt2);
- jccb(Assembler::notZero, COPY_SUBSTR);
-
- pop(cnt2);
- movptr(str2, rsp); // New substring address
- } // non constant
-
- bind(CHECK_STR);
- cmpl(cnt1, 8);
- jccb(Assembler::aboveEqual, BIG_STRINGS);
-
- // Check cross page boundary.
- movl(result, str1); // We need only low 32 bits
- andl(result, (os::vm_page_size()-1));
- cmpl(result, (os::vm_page_size()-16));
- jccb(Assembler::belowEqual, BIG_STRINGS);
-
- subptr(rsp, 16);
- int stk_offset = -2;
- if (int_cnt2 < 0) { // not constant
- push(cnt2);
- stk_offset += wordSize;
- }
- movl(cnt2, cnt1);
-
- bind(COPY_STR);
- load_unsigned_short(result, Address(str1, cnt2, Address::times_2, -2));
- movw(Address(rsp, cnt2, Address::times_2, stk_offset), result);
- decrement(cnt2);
- jccb(Assembler::notZero, COPY_STR);
-
- if (int_cnt2 < 0) { // not constant
- pop(cnt2);
- }
- movptr(str1, rsp); // New string address
-
- bind(BIG_STRINGS);
- // Load substring.
- if (int_cnt2 < 0) { // -1
- movdqu(vec, Address(str2, 0));
- push(cnt2); // substr count
- push(str2); // substr addr
- push(str1); // string addr
- } else {
- // Small (< 8 chars) constant substrings are loaded already.
- movl(cnt2, int_cnt2);
- }
- push(tmp); // original SP
-
- } // Finished loading
-
- //========================================================
- // Start search
- //
-
- movptr(result, str1); // string addr
-
- if (int_cnt2 < 0) { // Only for non constant substring
- jmpb(SCAN_TO_SUBSTR);
-
- // SP saved at sp+0
- // String saved at sp+1*wordSize
- // Substr saved at sp+2*wordSize
- // Substr count saved at sp+3*wordSize
-
- // Reload substr for rescan, this code
- // is executed only for large substrings (> 8 chars)
- bind(RELOAD_SUBSTR);
- movptr(str2, Address(rsp, 2*wordSize));
- movl(cnt2, Address(rsp, 3*wordSize));
- movdqu(vec, Address(str2, 0));
- // We came here after the beginning of the substring was
- // matched but the rest of it was not so we need to search
- // again. Start from the next element after the previous match.
- subptr(str1, result); // Restore counter
- shrl(str1, 1);
- addl(cnt1, str1);
- decrementl(cnt1); // Shift to next element
- cmpl(cnt1, cnt2);
- jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
-
- addptr(result, 2);
- } // non constant
-
- // Scan string for start of substr in 16-byte vectors
- bind(SCAN_TO_SUBSTR);
- assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
- pcmpestri(vec, Address(result, 0), 0x0d);
- jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1
- subl(cnt1, 8);
- jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
- cmpl(cnt1, cnt2);
- jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
- addptr(result, 16);
-
- bind(ADJUST_STR);
- cmpl(cnt1, 8); // Do not read beyond string
- jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
- // Back-up string to avoid reading beyond string.
- lea(result, Address(result, cnt1, Address::times_2, -16));
- movl(cnt1, 8);
- jmpb(SCAN_TO_SUBSTR);
-
- // Found a potential substr
- bind(FOUND_CANDIDATE);
- // After pcmpestri tmp(rcx) contains matched element index
-
- // Make sure string is still long enough
- subl(cnt1, tmp);
- cmpl(cnt1, cnt2);
- jccb(Assembler::greaterEqual, FOUND_SUBSTR);
- // Left less then substring.
-
- bind(RET_NOT_FOUND);
- movl(result, -1);
- jmpb(CLEANUP);
-
- bind(FOUND_SUBSTR);
- // Compute start addr of substr
- lea(result, Address(result, tmp, Address::times_2));
-
- if (int_cnt2 > 0) { // Constant substring
- // Repeat search for small substring (< 8 chars)
- // from new point without reloading substring.
- // Have to check that we don't read beyond string.
- cmpl(tmp, 8-int_cnt2);
- jccb(Assembler::greater, ADJUST_STR);
- // Fall through if matched whole substring.
- } else { // non constant
- assert(int_cnt2 == -1, "should be != 0");
-
- addl(tmp, cnt2);
- // Found result if we matched whole substring.
- cmpl(tmp, 8);
- jccb(Assembler::lessEqual, RET_FOUND);
-
- // Repeat search for small substring (<= 8 chars)
- // from new point 'str1' without reloading substring.
- cmpl(cnt2, 8);
- // Have to check that we don't read beyond string.
- jccb(Assembler::lessEqual, ADJUST_STR);
-
- Label CHECK_NEXT, CONT_SCAN_SUBSTR, RET_FOUND_LONG;
- // Compare the rest of substring (> 8 chars).
- movptr(str1, result);
-
- cmpl(tmp, cnt2);
- // First 8 chars are already matched.
- jccb(Assembler::equal, CHECK_NEXT);
-
- bind(SCAN_SUBSTR);
- pcmpestri(vec, Address(str1, 0), 0x0d);
- // Need to reload strings pointers if not matched whole vector
- jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
-
- bind(CHECK_NEXT);
- subl(cnt2, 8);
- jccb(Assembler::lessEqual, RET_FOUND_LONG); // Found full substring
- addptr(str1, 16);
- addptr(str2, 16);
- subl(cnt1, 8);
- cmpl(cnt2, 8); // Do not read beyond substring
- jccb(Assembler::greaterEqual, CONT_SCAN_SUBSTR);
- // Back-up strings to avoid reading beyond substring.
- lea(str2, Address(str2, cnt2, Address::times_2, -16));
- lea(str1, Address(str1, cnt2, Address::times_2, -16));
- subl(cnt1, cnt2);
- movl(cnt2, 8);
- addl(cnt1, 8);
- bind(CONT_SCAN_SUBSTR);
- movdqu(vec, Address(str2, 0));
- jmpb(SCAN_SUBSTR);
-
- bind(RET_FOUND_LONG);
- movptr(str1, Address(rsp, wordSize));
- } // non constant
-
- bind(RET_FOUND);
- // Compute substr offset
- subptr(result, str1);
- shrl(result, 1); // index
-
- bind(CLEANUP);
- pop(rsp); // restore SP
-
-} // string_indexof
-
-// Compare strings.
-void MacroAssembler::string_compare(Register str1, Register str2,
- Register cnt1, Register cnt2, Register result,
- XMMRegister vec1) {
- ShortBranchVerifier sbv(this);
- Label LENGTH_DIFF_LABEL, POP_LABEL, DONE_LABEL, WHILE_HEAD_LABEL;
-
- // Compute the minimum of the string lengths and the
- // difference of the string lengths (stack).
- // Do the conditional move stuff
- movl(result, cnt1);
- subl(cnt1, cnt2);
- push(cnt1);
- cmov32(Assembler::lessEqual, cnt2, result);
-
- // Is the minimum length zero?
- testl(cnt2, cnt2);
- jcc(Assembler::zero, LENGTH_DIFF_LABEL);
-
- // Load first characters
- load_unsigned_short(result, Address(str1, 0));
- load_unsigned_short(cnt1, Address(str2, 0));
-
- // Compare first characters
- subl(result, cnt1);
- jcc(Assembler::notZero, POP_LABEL);
- decrementl(cnt2);
- jcc(Assembler::zero, LENGTH_DIFF_LABEL);
-
- {
- // Check after comparing first character to see if strings are equivalent
- Label LSkip2;
- // Check if the strings start at same location
- cmpptr(str1, str2);
- jccb(Assembler::notEqual, LSkip2);
-
- // Check if the length difference is zero (from stack)
- cmpl(Address(rsp, 0), 0x0);
- jcc(Assembler::equal, LENGTH_DIFF_LABEL);
-
- // Strings might not be equivalent
- bind(LSkip2);
- }
-
- Address::ScaleFactor scale = Address::times_2;
- int stride = 8;
-
- // Advance to next element
- addptr(str1, 16/stride);
- addptr(str2, 16/stride);
-
- if (UseSSE42Intrinsics) {
- Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL;
- int pcmpmask = 0x19;
- // Setup to compare 16-byte vectors
- movl(result, cnt2);
- andl(cnt2, ~(stride - 1)); // cnt2 holds the vector count
- jccb(Assembler::zero, COMPARE_TAIL);
-
- lea(str1, Address(str1, result, scale));
- lea(str2, Address(str2, result, scale));
- negptr(result);
-
- // pcmpestri
- // inputs:
- // vec1- substring
- // rax - negative string length (elements count)
- // mem - scaned string
- // rdx - string length (elements count)
- // pcmpmask - cmp mode: 11000 (string compare with negated result)
- // + 00 (unsigned bytes) or + 01 (unsigned shorts)
- // outputs:
- // rcx - first mismatched element index
- assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri");
-
- bind(COMPARE_WIDE_VECTORS);
- movdqu(vec1, Address(str1, result, scale));
- pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
- // After pcmpestri cnt1(rcx) contains mismatched element index
-
- jccb(Assembler::below, VECTOR_NOT_EQUAL); // CF==1
- addptr(result, stride);
- subptr(cnt2, stride);
- jccb(Assembler::notZero, COMPARE_WIDE_VECTORS);
-
- // compare wide vectors tail
- testl(result, result);
- jccb(Assembler::zero, LENGTH_DIFF_LABEL);
-
- movl(cnt2, stride);
- movl(result, stride);
- negptr(result);
- movdqu(vec1, Address(str1, result, scale));
- pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
- jccb(Assembler::aboveEqual, LENGTH_DIFF_LABEL);
-
- // Mismatched characters in the vectors
- bind(VECTOR_NOT_EQUAL);
- addptr(result, cnt1);
- movptr(cnt2, result);
- load_unsigned_short(result, Address(str1, cnt2, scale));
- load_unsigned_short(cnt1, Address(str2, cnt2, scale));
- subl(result, cnt1);
- jmpb(POP_LABEL);
-
- bind(COMPARE_TAIL); // limit is zero
- movl(cnt2, result);
- // Fallthru to tail compare
- }
-
- // Shift str2 and str1 to the end of the arrays, negate min
- lea(str1, Address(str1, cnt2, scale, 0));
- lea(str2, Address(str2, cnt2, scale, 0));
- negptr(cnt2);
-
- // Compare the rest of the elements
- bind(WHILE_HEAD_LABEL);
- load_unsigned_short(result, Address(str1, cnt2, scale, 0));
- load_unsigned_short(cnt1, Address(str2, cnt2, scale, 0));
- subl(result, cnt1);
- jccb(Assembler::notZero, POP_LABEL);
- increment(cnt2);
- jccb(Assembler::notZero, WHILE_HEAD_LABEL);
-
- // Strings are equal up to min length. Return the length difference.
- bind(LENGTH_DIFF_LABEL);
- pop(result);
- jmpb(DONE_LABEL);
-
- // Discard the stored length difference
- bind(POP_LABEL);
- pop(cnt1);
-
- // That's it
- bind(DONE_LABEL);
-}
-
-// Compare char[] arrays aligned to 4 bytes or substrings.
-void MacroAssembler::char_arrays_equals(bool is_array_equ, Register ary1, Register ary2,
- Register limit, Register result, Register chr,
- XMMRegister vec1, XMMRegister vec2) {
- ShortBranchVerifier sbv(this);
- Label TRUE_LABEL, FALSE_LABEL, DONE, COMPARE_VECTORS, COMPARE_CHAR;
-
- int length_offset = arrayOopDesc::length_offset_in_bytes();
- int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
-
- // Check the input args
- cmpptr(ary1, ary2);
- jcc(Assembler::equal, TRUE_LABEL);
-
- if (is_array_equ) {
- // Need additional checks for arrays_equals.
- testptr(ary1, ary1);
- jcc(Assembler::zero, FALSE_LABEL);
- testptr(ary2, ary2);
- jcc(Assembler::zero, FALSE_LABEL);
-
- // Check the lengths
- movl(limit, Address(ary1, length_offset));
- cmpl(limit, Address(ary2, length_offset));
- jcc(Assembler::notEqual, FALSE_LABEL);
- }
-
- // count == 0
- testl(limit, limit);
- jcc(Assembler::zero, TRUE_LABEL);
-
- if (is_array_equ) {
- // Load array address
- lea(ary1, Address(ary1, base_offset));
- lea(ary2, Address(ary2, base_offset));
- }
-
- shll(limit, 1); // byte count != 0
- movl(result, limit); // copy
-
- if (UseSSE42Intrinsics) {
- // With SSE4.2, use double quad vector compare
- Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
-
- // Compare 16-byte vectors
- andl(result, 0x0000000e); // tail count (in bytes)
- andl(limit, 0xfffffff0); // vector count (in bytes)
- jccb(Assembler::zero, COMPARE_TAIL);
-
- lea(ary1, Address(ary1, limit, Address::times_1));
- lea(ary2, Address(ary2, limit, Address::times_1));
- negptr(limit);
-
- bind(COMPARE_WIDE_VECTORS);
- movdqu(vec1, Address(ary1, limit, Address::times_1));
- movdqu(vec2, Address(ary2, limit, Address::times_1));
- pxor(vec1, vec2);
-
- ptest(vec1, vec1);
- jccb(Assembler::notZero, FALSE_LABEL);
- addptr(limit, 16);
- jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
-
- testl(result, result);
- jccb(Assembler::zero, TRUE_LABEL);
-
- movdqu(vec1, Address(ary1, result, Address::times_1, -16));
- movdqu(vec2, Address(ary2, result, Address::times_1, -16));
- pxor(vec1, vec2);
-
- ptest(vec1, vec1);
- jccb(Assembler::notZero, FALSE_LABEL);
- jmpb(TRUE_LABEL);
-
- bind(COMPARE_TAIL); // limit is zero
- movl(limit, result);
- // Fallthru to tail compare
- }
-
- // Compare 4-byte vectors
- andl(limit, 0xfffffffc); // vector count (in bytes)
- jccb(Assembler::zero, COMPARE_CHAR);
-
- lea(ary1, Address(ary1, limit, Address::times_1));
- lea(ary2, Address(ary2, limit, Address::times_1));
- negptr(limit);
-
- bind(COMPARE_VECTORS);
- movl(chr, Address(ary1, limit, Address::times_1));
- cmpl(chr, Address(ary2, limit, Address::times_1));
- jccb(Assembler::notEqual, FALSE_LABEL);
- addptr(limit, 4);
- jcc(Assembler::notZero, COMPARE_VECTORS);
-
- // Compare trailing char (final 2 bytes), if any
- bind(COMPARE_CHAR);
- testl(result, 0x2); // tail char
- jccb(Assembler::zero, TRUE_LABEL);
- load_unsigned_short(chr, Address(ary1, 0));
- load_unsigned_short(limit, Address(ary2, 0));
- cmpl(chr, limit);
- jccb(Assembler::notEqual, FALSE_LABEL);
-
- bind(TRUE_LABEL);
- movl(result, 1); // return true
- jmpb(DONE);
-
- bind(FALSE_LABEL);
- xorl(result, result); // return false
-
- // That's it
- bind(DONE);
-}
-
-void MacroAssembler::generate_fill(BasicType t, bool aligned,
- Register to, Register value, Register count,
- Register rtmp, XMMRegister xtmp) {
- ShortBranchVerifier sbv(this);
- assert_different_registers(to, value, count, rtmp);
- Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
- Label L_fill_2_bytes, L_fill_4_bytes;
-
- int shift = -1;
- switch (t) {
- case T_BYTE:
- shift = 2;
- break;
- case T_SHORT:
- shift = 1;
- break;
- case T_INT:
- shift = 0;
- break;
- default: ShouldNotReachHere();
- }
-
- if (t == T_BYTE) {
- andl(value, 0xff);
- movl(rtmp, value);
- shll(rtmp, 8);
- orl(value, rtmp);
- }
- if (t == T_SHORT) {
- andl(value, 0xffff);
- }
- if (t == T_BYTE || t == T_SHORT) {
- movl(rtmp, value);
- shll(rtmp, 16);
- orl(value, rtmp);
- }
-
- cmpl(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
- jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
- if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
- // align source address at 4 bytes address boundary
- if (t == T_BYTE) {
- // One byte misalignment happens only for byte arrays
- testptr(to, 1);
- jccb(Assembler::zero, L_skip_align1);
- movb(Address(to, 0), value);
- increment(to);
- decrement(count);
- BIND(L_skip_align1);
- }
- // Two bytes misalignment happens only for byte and short (char) arrays
- testptr(to, 2);
- jccb(Assembler::zero, L_skip_align2);
- movw(Address(to, 0), value);
- addptr(to, 2);
- subl(count, 1<<(shift-1));
- BIND(L_skip_align2);
- }
- if (UseSSE < 2) {
- Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
- // Fill 32-byte chunks
- subl(count, 8 << shift);
- jcc(Assembler::less, L_check_fill_8_bytes);
- align(16);
-
- BIND(L_fill_32_bytes_loop);
-
- for (int i = 0; i < 32; i += 4) {
- movl(Address(to, i), value);
- }
-
- addptr(to, 32);
- subl(count, 8 << shift);
- jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
- BIND(L_check_fill_8_bytes);
- addl(count, 8 << shift);
- jccb(Assembler::zero, L_exit);
- jmpb(L_fill_8_bytes);
-
- //
- // length is too short, just fill qwords
- //
- BIND(L_fill_8_bytes_loop);
- movl(Address(to, 0), value);
- movl(Address(to, 4), value);
- addptr(to, 8);
- BIND(L_fill_8_bytes);
- subl(count, 1 << (shift + 1));
- jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
- // fall through to fill 4 bytes
- } else {
- Label L_fill_32_bytes;
- if (!UseUnalignedLoadStores) {
- // align to 8 bytes, we know we are 4 byte aligned to start
- testptr(to, 4);
- jccb(Assembler::zero, L_fill_32_bytes);
- movl(Address(to, 0), value);
- addptr(to, 4);
- subl(count, 1<<shift);
- }
- BIND(L_fill_32_bytes);
- {
- assert( UseSSE >= 2, "supported cpu only" );
- Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
- // Fill 32-byte chunks
- movdl(xtmp, value);
- pshufd(xtmp, xtmp, 0);
-
- subl(count, 8 << shift);
- jcc(Assembler::less, L_check_fill_8_bytes);
- align(16);
-
- BIND(L_fill_32_bytes_loop);
-
- if (UseUnalignedLoadStores) {
- movdqu(Address(to, 0), xtmp);
- movdqu(Address(to, 16), xtmp);
- } else {
- movq(Address(to, 0), xtmp);
- movq(Address(to, 8), xtmp);
- movq(Address(to, 16), xtmp);
- movq(Address(to, 24), xtmp);
- }
-
- addptr(to, 32);
- subl(count, 8 << shift);
- jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
- BIND(L_check_fill_8_bytes);
- addl(count, 8 << shift);
- jccb(Assembler::zero, L_exit);
- jmpb(L_fill_8_bytes);
-
- //
- // length is too short, just fill qwords
- //
- BIND(L_fill_8_bytes_loop);
- movq(Address(to, 0), xtmp);
- addptr(to, 8);
- BIND(L_fill_8_bytes);
- subl(count, 1 << (shift + 1));
- jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
- }
- }
- // fill trailing 4 bytes
- BIND(L_fill_4_bytes);
- testl(count, 1<<shift);
- jccb(Assembler::zero, L_fill_2_bytes);
- movl(Address(to, 0), value);
- if (t == T_BYTE || t == T_SHORT) {
- addptr(to, 4);
- BIND(L_fill_2_bytes);
- // fill trailing 2 bytes
- testl(count, 1<<(shift-1));
- jccb(Assembler::zero, L_fill_byte);
- movw(Address(to, 0), value);
- if (t == T_BYTE) {
- addptr(to, 2);
- BIND(L_fill_byte);
- // fill trailing byte
- testl(count, 1);
- jccb(Assembler::zero, L_exit);
- movb(Address(to, 0), value);
- } else {
- BIND(L_fill_byte);
- }
- } else {
- BIND(L_fill_2_bytes);
- }
- BIND(L_exit);
-}
-#undef BIND
-#undef BLOCK_COMMENT
-
-
-Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
- switch (cond) {
- // Note some conditions are synonyms for others
- case Assembler::zero: return Assembler::notZero;
- case Assembler::notZero: return Assembler::zero;
- case Assembler::less: return Assembler::greaterEqual;
- case Assembler::lessEqual: return Assembler::greater;
- case Assembler::greater: return Assembler::lessEqual;
- case Assembler::greaterEqual: return Assembler::less;
- case Assembler::below: return Assembler::aboveEqual;
- case Assembler::belowEqual: return Assembler::above;
- case Assembler::above: return Assembler::belowEqual;
- case Assembler::aboveEqual: return Assembler::below;
- case Assembler::overflow: return Assembler::noOverflow;
- case Assembler::noOverflow: return Assembler::overflow;
- case Assembler::negative: return Assembler::positive;
- case Assembler::positive: return Assembler::negative;
- case Assembler::parity: return Assembler::noParity;
- case Assembler::noParity: return Assembler::parity;
- }
- ShouldNotReachHere(); return Assembler::overflow;
-}
-
-SkipIfEqual::SkipIfEqual(
- MacroAssembler* masm, const bool* flag_addr, bool value) {
- _masm = masm;
- _masm->cmp8(ExternalAddress((address)flag_addr), value);
- _masm->jcc(Assembler::equal, _label);
-}
-
-SkipIfEqual::~SkipIfEqual() {
- _masm->bind(_label);
-}
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,6 +25,8 @@
#ifndef CPU_X86_VM_ASSEMBLER_X86_HPP
#define CPU_X86_VM_ASSEMBLER_X86_HPP
+#include "asm/register.hpp"
+
class BiasedLockingCounters;
// Contains all the definitions needed for x86 assembly code generation.
@@ -706,8 +708,6 @@
void check_relocation(RelocationHolder const& rspec, int format);
#endif
- inline void emit_long64(jlong x);
-
void emit_data(jint data, relocInfo::relocType rtype, int format);
void emit_data(jint data, RelocationHolder const& rspec, int format);
void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
@@ -916,7 +916,7 @@
void cdqq();
- void cld() { emit_byte(0xfc); }
+ void cld();
void clflush(Address adr);
@@ -963,10 +963,7 @@
void comiss(XMMRegister dst, XMMRegister src);
// Identify processor type and features
- void cpuid() {
- emit_byte(0x0F);
- emit_byte(0xA2);
- }
+ void cpuid();
// Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value
void cvtsd2ss(XMMRegister dst, XMMRegister src);
@@ -1211,11 +1208,7 @@
void leaq(Register dst, Address src);
- void lfence() {
- emit_byte(0x0F);
- emit_byte(0xAE);
- emit_byte(0xE8);
- }
+ void lfence();
void lock();
@@ -1523,7 +1516,7 @@
void sqrtss(XMMRegister dst, Address src);
void sqrtss(XMMRegister dst, XMMRegister src);
- void std() { emit_byte(0xfd); }
+ void std();
void stmxcsr( Address dst );
@@ -1580,11 +1573,7 @@
void xchgq(Register dst, Register src);
// Get Value of Extended Control Register
- void xgetbv() {
- emit_byte(0x0F);
- emit_byte(0x01);
- emit_byte(0xD0);
- }
+ void xgetbv();
void xorl(Register dst, int32_t imm32);
void xorl(Register dst, Address src);
@@ -1781,1114 +1770,4 @@
};
-
-// MacroAssembler extends Assembler by frequently used macros.
-//
-// Instructions for which a 'better' code sequence exists depending
-// on arguments should also go in here.
-
-class MacroAssembler: public Assembler {
- friend class LIR_Assembler;
- friend class Runtime1; // as_Address()
-
- protected:
-
- Address as_Address(AddressLiteral adr);
- Address as_Address(ArrayAddress adr);
-
- // Support for VM calls
- //
- // This is the base routine called by the different versions of call_VM_leaf. The interpreter
- // may customize this version by overriding it for its purposes (e.g., to save/restore
- // additional registers when doing a VM call).
-#ifdef CC_INTERP
- // c++ interpreter never wants to use interp_masm version of call_VM
- #define VIRTUAL
-#else
- #define VIRTUAL virtual
-#endif
-
- VIRTUAL void call_VM_leaf_base(
- address entry_point, // the entry point
- int number_of_arguments // the number of arguments to pop after the call
- );
-
- // This is the base routine called by the different versions of call_VM. The interpreter
- // may customize this version by overriding it for its purposes (e.g., to save/restore
- // additional registers when doing a VM call).
- //
- // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base
- // returns the register which contains the thread upon return. If a thread register has been
- // specified, the return value will correspond to that register. If no last_java_sp is specified
- // (noreg) than rsp will be used instead.
- VIRTUAL void call_VM_base( // returns the register containing the thread upon return
- Register oop_result, // where an oop-result ends up if any; use noreg otherwise
- Register java_thread, // the thread if computed before ; use noreg otherwise
- Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
- address entry_point, // the entry point
- int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
- bool check_exceptions // whether to check for pending exceptions after return
- );
-
- // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
- // The implementation is only non-empty for the InterpreterMacroAssembler,
- // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
- virtual void check_and_handle_popframe(Register java_thread);
- virtual void check_and_handle_earlyret(Register java_thread);
-
- void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
-
- // helpers for FPU flag access
- // tmp is a temporary register, if none is available use noreg
- void save_rax (Register tmp);
- void restore_rax(Register tmp);
-
- public:
- MacroAssembler(CodeBuffer* code) : Assembler(code) {}
-
- // Support for NULL-checks
- //
- // Generates code that causes a NULL OS exception if the content of reg is NULL.
- // If the accessed location is M[reg + offset] and the offset is known, provide the
- // offset. No explicit code generation is needed if the offset is within a certain
- // range (0 <= offset <= page_size).
-
- void null_check(Register reg, int offset = -1);
- static bool needs_explicit_null_check(intptr_t offset);
-
- // Required platform-specific helpers for Label::patch_instructions.
- // They _shadow_ the declarations in AbstractAssembler, which are undefined.
- void pd_patch_instruction(address branch, address target);
-#ifndef PRODUCT
- static void pd_print_patched_instruction(address branch);
-#endif
-
- // The following 4 methods return the offset of the appropriate move instruction
-
- // Support for fast byte/short loading with zero extension (depending on particular CPU)
- int load_unsigned_byte(Register dst, Address src);
- int load_unsigned_short(Register dst, Address src);
-
- // Support for fast byte/short loading with sign extension (depending on particular CPU)
- int load_signed_byte(Register dst, Address src);
- int load_signed_short(Register dst, Address src);
-
- // Support for sign-extension (hi:lo = extend_sign(lo))
- void extend_sign(Register hi, Register lo);
-
- // Load and store values by size and signed-ness
- void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
- void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
-
- // Support for inc/dec with optimal instruction selection depending on value
-
- void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; }
- void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
-
- void decrementl(Address dst, int value = 1);
- void decrementl(Register reg, int value = 1);
-
- void decrementq(Register reg, int value = 1);
- void decrementq(Address dst, int value = 1);
-
- void incrementl(Address dst, int value = 1);
- void incrementl(Register reg, int value = 1);
-
- void incrementq(Register reg, int value = 1);
- void incrementq(Address dst, int value = 1);
-
-
- // Support optimal SSE move instructions.
- void movflt(XMMRegister dst, XMMRegister src) {
- if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
- else { movss (dst, src); return; }
- }
- void movflt(XMMRegister dst, Address src) { movss(dst, src); }
- void movflt(XMMRegister dst, AddressLiteral src);
- void movflt(Address dst, XMMRegister src) { movss(dst, src); }
-
- void movdbl(XMMRegister dst, XMMRegister src) {
- if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
- else { movsd (dst, src); return; }
- }
-
- void movdbl(XMMRegister dst, AddressLiteral src);
-
- void movdbl(XMMRegister dst, Address src) {
- if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
- else { movlpd(dst, src); return; }
- }
- void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
-
- void incrementl(AddressLiteral dst);
- void incrementl(ArrayAddress dst);
-
- // Alignment
- void align(int modulus);
-
- // A 5 byte nop that is safe for patching (see patch_verified_entry)
- void fat_nop();
-
- // Stack frame creation/removal
- void enter();
- void leave();
-
- // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
- // The pointer will be loaded into the thread register.
- void get_thread(Register thread);
-
-
- // Support for VM calls
- //
- // It is imperative that all calls into the VM are handled via the call_VM macros.
- // They make sure that the stack linkage is setup correctly. call_VM's correspond
- // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
-
-
- void call_VM(Register oop_result,
- address entry_point,
- bool check_exceptions = true);
- void call_VM(Register oop_result,
- address entry_point,
- Register arg_1,
- bool check_exceptions = true);
- void call_VM(Register oop_result,
- address entry_point,
- Register arg_1, Register arg_2,
- bool check_exceptions = true);
- void call_VM(Register oop_result,
- address entry_point,
- Register arg_1, Register arg_2, Register arg_3,
- bool check_exceptions = true);
-
- // Overloadings with last_Java_sp
- void call_VM(Register oop_result,
- Register last_java_sp,
- address entry_point,
- int number_of_arguments = 0,
- bool check_exceptions = true);
- void call_VM(Register oop_result,
- Register last_java_sp,
- address entry_point,
- Register arg_1, bool
- check_exceptions = true);
- void call_VM(Register oop_result,
- Register last_java_sp,
- address entry_point,
- Register arg_1, Register arg_2,
- bool check_exceptions = true);
- void call_VM(Register oop_result,
- Register last_java_sp,
- address entry_point,
- Register arg_1, Register arg_2, Register arg_3,
- bool check_exceptions = true);
-
- void get_vm_result (Register oop_result, Register thread);
- void get_vm_result_2(Register metadata_result, Register thread);
-
- // These always tightly bind to MacroAssembler::call_VM_base
- // bypassing the virtual implementation
- void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
- void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
- void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
- void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
- void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
-
- void call_VM_leaf(address entry_point,
- int number_of_arguments = 0);
- void call_VM_leaf(address entry_point,
- Register arg_1);
- void call_VM_leaf(address entry_point,
- Register arg_1, Register arg_2);
- void call_VM_leaf(address entry_point,
- Register arg_1, Register arg_2, Register arg_3);
-
- // These always tightly bind to MacroAssembler::call_VM_leaf_base
- // bypassing the virtual implementation
- void super_call_VM_leaf(address entry_point);
- void super_call_VM_leaf(address entry_point, Register arg_1);
- void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
- void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
- void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
-
- // last Java Frame (fills frame anchor)
- void set_last_Java_frame(Register thread,
- Register last_java_sp,
- Register last_java_fp,
- address last_java_pc);
-
- // thread in the default location (r15_thread on 64bit)
- void set_last_Java_frame(Register last_java_sp,
- Register last_java_fp,
- address last_java_pc);
-
- void reset_last_Java_frame(Register thread, bool clear_fp, bool clear_pc);
-
- // thread in the default location (r15_thread on 64bit)
- void reset_last_Java_frame(bool clear_fp, bool clear_pc);
-
- // Stores
- void store_check(Register obj); // store check for obj - register is destroyed afterwards
- void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed)
-
-#ifndef SERIALGC
-
- void g1_write_barrier_pre(Register obj,
- Register pre_val,
- Register thread,
- Register tmp,
- bool tosca_live,
- bool expand_call);
-
- void g1_write_barrier_post(Register store_addr,
- Register new_val,
- Register thread,
- Register tmp,
- Register tmp2);
-
-#endif // SERIALGC
-
- // split store_check(Register obj) to enhance instruction interleaving
- void store_check_part_1(Register obj);
- void store_check_part_2(Register obj);
-
- // C 'boolean' to Java boolean: x == 0 ? 0 : 1
- void c2bool(Register x);
-
- // C++ bool manipulation
-
- void movbool(Register dst, Address src);
- void movbool(Address dst, bool boolconst);
- void movbool(Address dst, Register src);
- void testbool(Register dst);
-
- // oop manipulations
- void load_klass(Register dst, Register src);
- void store_klass(Register dst, Register src);
-
- void load_heap_oop(Register dst, Address src);
- void load_heap_oop_not_null(Register dst, Address src);
- void store_heap_oop(Address dst, Register src);
- void cmp_heap_oop(Register src1, Address src2, Register tmp = noreg);
-
- // Used for storing NULL. All other oop constants should be
- // stored using routines that take a jobject.
- void store_heap_oop_null(Address dst);
-
- void load_prototype_header(Register dst, Register src);
-
-#ifdef _LP64
- void store_klass_gap(Register dst, Register src);
-
- // This dummy is to prevent a call to store_heap_oop from
- // converting a zero (like NULL) into a Register by giving
- // the compiler two choices it can't resolve
-
- void store_heap_oop(Address dst, void* dummy);
-
- void encode_heap_oop(Register r);
- void decode_heap_oop(Register r);
- void encode_heap_oop_not_null(Register r);
- void decode_heap_oop_not_null(Register r);
- void encode_heap_oop_not_null(Register dst, Register src);
- void decode_heap_oop_not_null(Register dst, Register src);
-
- void set_narrow_oop(Register dst, jobject obj);
- void set_narrow_oop(Address dst, jobject obj);
- void cmp_narrow_oop(Register dst, jobject obj);
- void cmp_narrow_oop(Address dst, jobject obj);
-
- void encode_klass_not_null(Register r);
- void decode_klass_not_null(Register r);
- void encode_klass_not_null(Register dst, Register src);
- void decode_klass_not_null(Register dst, Register src);
- void set_narrow_klass(Register dst, Klass* k);
- void set_narrow_klass(Address dst, Klass* k);
- void cmp_narrow_klass(Register dst, Klass* k);
- void cmp_narrow_klass(Address dst, Klass* k);
-
- // if heap base register is used - reinit it with the correct value
- void reinit_heapbase();
-
- DEBUG_ONLY(void verify_heapbase(const char* msg);)
-
-#endif // _LP64
-
- // Int division/remainder for Java
- // (as idivl, but checks for special case as described in JVM spec.)
- // returns idivl instruction offset for implicit exception handling
- int corrected_idivl(Register reg);
-
- // Long division/remainder for Java
- // (as idivq, but checks for special case as described in JVM spec.)
- // returns idivq instruction offset for implicit exception handling
- int corrected_idivq(Register reg);
-
- void int3();
-
- // Long operation macros for a 32bit cpu
- // Long negation for Java
- void lneg(Register hi, Register lo);
-
- // Long multiplication for Java
- // (destroys contents of eax, ebx, ecx and edx)
- void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
-
- // Long shifts for Java
- // (semantics as described in JVM spec.)
- void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f)
- void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f)
-
- // Long compare for Java
- // (semantics as described in JVM spec.)
- void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
-
-
- // misc
-
- // Sign extension
- void sign_extend_short(Register reg);
- void sign_extend_byte(Register reg);
-
- // Division by power of 2, rounding towards 0
- void division_with_shift(Register reg, int shift_value);
-
- // Compares the top-most stack entries on the FPU stack and sets the eflags as follows:
- //
- // CF (corresponds to C0) if x < y
- // PF (corresponds to C2) if unordered
- // ZF (corresponds to C3) if x = y
- //
- // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
- // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code)
- void fcmp(Register tmp);
- // Variant of the above which allows y to be further down the stack
- // and which only pops x and y if specified. If pop_right is
- // specified then pop_left must also be specified.
- void fcmp(Register tmp, int index, bool pop_left, bool pop_right);
-
- // Floating-point comparison for Java
- // Compares the top-most stack entries on the FPU stack and stores the result in dst.
- // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
- // (semantics as described in JVM spec.)
- void fcmp2int(Register dst, bool unordered_is_less);
- // Variant of the above which allows y to be further down the stack
- // and which only pops x and y if specified. If pop_right is
- // specified then pop_left must also be specified.
- void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right);
-
- // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards)
- // tmp is a temporary register, if none is available use noreg
- void fremr(Register tmp);
-
-
- // same as fcmp2int, but using SSE2
- void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
- void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
-
- // Inlined sin/cos generator for Java; must not use CPU instruction
- // directly on Intel as it does not have high enough precision
- // outside of the range [-pi/4, pi/4]. Extra argument indicate the
- // number of FPU stack slots in use; all but the topmost will
- // require saving if a slow case is necessary. Assumes argument is
- // on FP TOS; result is on FP TOS. No cpu registers are changed by
- // this code.
- void trigfunc(char trig, int num_fpu_regs_in_use = 1);
-
- // branch to L if FPU flag C2 is set/not set
- // tmp is a temporary register, if none is available use noreg
- void jC2 (Register tmp, Label& L);
- void jnC2(Register tmp, Label& L);
-
- // Pop ST (ffree & fincstp combined)
- void fpop();
-
- // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
- void push_fTOS();
-
- // pops double TOS element from CPU stack and pushes on FPU stack
- void pop_fTOS();
-
- void empty_FPU_stack();
-
- void push_IU_state();
- void pop_IU_state();
-
- void push_FPU_state();
- void pop_FPU_state();
-
- void push_CPU_state();
- void pop_CPU_state();
-
- // Round up to a power of two
- void round_to(Register reg, int modulus);
-
- // Callee saved registers handling
- void push_callee_saved_registers();
- void pop_callee_saved_registers();
-
- // allocation
- void eden_allocate(
- Register obj, // result: pointer to object after successful allocation
- Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
- int con_size_in_bytes, // object size in bytes if known at compile time
- Register t1, // temp register
- Label& slow_case // continuation point if fast allocation fails
- );
- void tlab_allocate(
- Register obj, // result: pointer to object after successful allocation
- Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
- int con_size_in_bytes, // object size in bytes if known at compile time
- Register t1, // temp register
- Register t2, // temp register
- Label& slow_case // continuation point if fast allocation fails
- );
- Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address
- void incr_allocated_bytes(Register thread,
- Register var_size_in_bytes, int con_size_in_bytes,
- Register t1 = noreg);
-
- // interface method calling
- void lookup_interface_method(Register recv_klass,
- Register intf_klass,
- RegisterOrConstant itable_index,
- Register method_result,
- Register scan_temp,
- Label& no_such_interface);
-
- // virtual method calling
- void lookup_virtual_method(Register recv_klass,
- RegisterOrConstant vtable_index,
- Register method_result);
-
- // Test sub_klass against super_klass, with fast and slow paths.
-
- // The fast path produces a tri-state answer: yes / no / maybe-slow.
- // One of the three labels can be NULL, meaning take the fall-through.
- // If super_check_offset is -1, the value is loaded up from super_klass.
- // No registers are killed, except temp_reg.
- void check_klass_subtype_fast_path(Register sub_klass,
- Register super_klass,
- Register temp_reg,
- Label* L_success,
- Label* L_failure,
- Label* L_slow_path,
- RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
-
- // The rest of the type check; must be wired to a corresponding fast path.
- // It does not repeat the fast path logic, so don't use it standalone.
- // The temp_reg and temp2_reg can be noreg, if no temps are available.
- // Updates the sub's secondary super cache as necessary.
- // If set_cond_codes, condition codes will be Z on success, NZ on failure.
- void check_klass_subtype_slow_path(Register sub_klass,
- Register super_klass,
- Register temp_reg,
- Register temp2_reg,
- Label* L_success,
- Label* L_failure,
- bool set_cond_codes = false);
-
- // Simplified, combined version, good for typical uses.
- // Falls through on failure.
- void check_klass_subtype(Register sub_klass,
- Register super_klass,
- Register temp_reg,
- Label& L_success);
-
- // method handles (JSR 292)
- Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
-
- //----
- void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0
-
- // Debugging
-
- // only if +VerifyOops
- // TODO: Make these macros with file and line like sparc version!
- void verify_oop(Register reg, const char* s = "broken oop");
- void verify_oop_addr(Address addr, const char * s = "broken oop addr");
-
- // TODO: verify method and klass metadata (compare against vptr?)
- void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
- void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
-
-#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
-#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
-
- // only if +VerifyFPU
- void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
-
- // prints msg, dumps registers and stops execution
- void stop(const char* msg);
-
- // prints msg and continues
- void warn(const char* msg);
-
- // dumps registers and other state
- void print_state();
-
- static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
- static void debug64(char* msg, int64_t pc, int64_t regs[]);
- static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
- static void print_state64(int64_t pc, int64_t regs[]);
-
- void os_breakpoint();
-
- void untested() { stop("untested"); }
-
- void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); }
-
- void should_not_reach_here() { stop("should not reach here"); }
-
- void print_CPU_state();
-
- // Stack overflow checking
- void bang_stack_with_offset(int offset) {
- // stack grows down, caller passes positive offset
- assert(offset > 0, "must bang with negative offset");
- movl(Address(rsp, (-offset)), rax);
- }
-
- // Writes to stack successive pages until offset reached to check for
- // stack overflow + shadow pages. Also, clobbers tmp
- void bang_stack_size(Register size, Register tmp);
-
- virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
- Register tmp,
- int offset);
-
- // Support for serializing memory accesses between threads
- void serialize_memory(Register thread, Register tmp);
-
- void verify_tlab();
-
- // Biased locking support
- // lock_reg and obj_reg must be loaded up with the appropriate values.
- // swap_reg must be rax, and is killed.
- // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
- // be killed; if not supplied, push/pop will be used internally to
- // allocate a temporary (inefficient, avoid if possible).
- // Optional slow case is for implementations (interpreter and C1) which branch to
- // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
- // Returns offset of first potentially-faulting instruction for null
- // check info (currently consumed only by C1). If
- // swap_reg_contains_mark is true then returns -1 as it is assumed
- // the calling code has already passed any potential faults.
- int biased_locking_enter(Register lock_reg, Register obj_reg,
- Register swap_reg, Register tmp_reg,
- bool swap_reg_contains_mark,
- Label& done, Label* slow_case = NULL,
- BiasedLockingCounters* counters = NULL);
- void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
-
-
- Condition negate_condition(Condition cond);
-
- // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
- // operands. In general the names are modified to avoid hiding the instruction in Assembler
- // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
- // here in MacroAssembler. The major exception to this rule is call
-
- // Arithmetics
-
-
- void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
- void addptr(Address dst, Register src);
-
- void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
- void addptr(Register dst, int32_t src);
- void addptr(Register dst, Register src);
- void addptr(Register dst, RegisterOrConstant src) {
- if (src.is_constant()) addptr(dst, (int) src.as_constant());
- else addptr(dst, src.as_register());
- }
-
- void andptr(Register dst, int32_t src);
- void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
-
- void cmp8(AddressLiteral src1, int imm);
-
- // renamed to drag out the casting of address to int32_t/intptr_t
- void cmp32(Register src1, int32_t imm);
-
- void cmp32(AddressLiteral src1, int32_t imm);
- // compare reg - mem, or reg - &mem
- void cmp32(Register src1, AddressLiteral src2);
-
- void cmp32(Register src1, Address src2);
-
-#ifndef _LP64
- void cmpklass(Address dst, Metadata* obj);
- void cmpklass(Register dst, Metadata* obj);
- void cmpoop(Address dst, jobject obj);
- void cmpoop(Register dst, jobject obj);
-#endif // _LP64
-
- // NOTE src2 must be the lval. This is NOT an mem-mem compare
- void cmpptr(Address src1, AddressLiteral src2);
-
- void cmpptr(Register src1, AddressLiteral src2);
-
- void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
- void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
- // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
-
- void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
- void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
-
- // cmp64 to avoild hiding cmpq
- void cmp64(Register src1, AddressLiteral src);
-
- void cmpxchgptr(Register reg, Address adr);
-
- void locked_cmpxchgptr(Register reg, AddressLiteral adr);
-
-
- void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
-
-
- void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
-
- void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
-
- void shlptr(Register dst, int32_t shift);
- void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); }
-
- void shrptr(Register dst, int32_t shift);
- void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); }
-
- void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); }
- void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); }
-
- void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
-
- void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
- void subptr(Register dst, int32_t src);
- // Force generation of a 4 byte immediate value even if it fits into 8bit
- void subptr_imm32(Register dst, int32_t src);
- void subptr(Register dst, Register src);
- void subptr(Register dst, RegisterOrConstant src) {
- if (src.is_constant()) subptr(dst, (int) src.as_constant());
- else subptr(dst, src.as_register());
- }
-
- void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
- void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
-
- void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
- void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
-
- void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; }
-
-
-
- // Helper functions for statistics gathering.
- // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
- void cond_inc32(Condition cond, AddressLiteral counter_addr);
- // Unconditional atomic increment.
- void atomic_incl(AddressLiteral counter_addr);
-
- void lea(Register dst, AddressLiteral adr);
- void lea(Address dst, AddressLiteral adr);
- void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
-
- void leal32(Register dst, Address src) { leal(dst, src); }
-
- // Import other testl() methods from the parent class or else
- // they will be hidden by the following overriding declaration.
- using Assembler::testl;
- void testl(Register dst, AddressLiteral src);
-
- void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
- void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
- void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
-
- void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
- void testptr(Register src1, Register src2);
-
- void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
- void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
-
- // Calls
-
- void call(Label& L, relocInfo::relocType rtype);
- void call(Register entry);
-
- // NOTE: this call tranfers to the effective address of entry NOT
- // the address contained by entry. This is because this is more natural
- // for jumps/calls.
- void call(AddressLiteral entry);
-
- // Emit the CompiledIC call idiom
- void ic_call(address entry);
-
- // Jumps
-
- // NOTE: these jumps tranfer to the effective address of dst NOT
- // the address contained by dst. This is because this is more natural
- // for jumps/calls.
- void jump(AddressLiteral dst);
- void jump_cc(Condition cc, AddressLiteral dst);
-
- // 32bit can do a case table jump in one instruction but we no longer allow the base
- // to be installed in the Address class. This jump will tranfers to the address
- // contained in the location described by entry (not the address of entry)
- void jump(ArrayAddress entry);
-
- // Floating
-
- void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
- void andpd(XMMRegister dst, AddressLiteral src);
-
- void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
- void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
- void andps(XMMRegister dst, AddressLiteral src);
-
- void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
- void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
- void comiss(XMMRegister dst, AddressLiteral src);
-
- void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
- void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
- void comisd(XMMRegister dst, AddressLiteral src);
-
- void fadd_s(Address src) { Assembler::fadd_s(src); }
- void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); }
-
- void fldcw(Address src) { Assembler::fldcw(src); }
- void fldcw(AddressLiteral src);
-
- void fld_s(int index) { Assembler::fld_s(index); }
- void fld_s(Address src) { Assembler::fld_s(src); }
- void fld_s(AddressLiteral src);
-
- void fld_d(Address src) { Assembler::fld_d(src); }
- void fld_d(AddressLiteral src);
-
- void fld_x(Address src) { Assembler::fld_x(src); }
- void fld_x(AddressLiteral src);
-
- void fmul_s(Address src) { Assembler::fmul_s(src); }
- void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
-
- void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
- void ldmxcsr(AddressLiteral src);
-
- // compute pow(x,y) and exp(x) with x86 instructions. Don't cover
- // all corner cases and may result in NaN and require fallback to a
- // runtime call.
- void fast_pow();
- void fast_exp();
- void increase_precision();
- void restore_precision();
-
- // computes exp(x). Fallback to runtime call included.
- void exp_with_fallback(int num_fpu_regs_in_use) { pow_or_exp(true, num_fpu_regs_in_use); }
- // computes pow(x,y). Fallback to runtime call included.
- void pow_with_fallback(int num_fpu_regs_in_use) { pow_or_exp(false, num_fpu_regs_in_use); }
-
-private:
-
- // call runtime as a fallback for trig functions and pow/exp.
- void fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use);
-
- // computes 2^(Ylog2X); Ylog2X in ST(0)
- void pow_exp_core_encoding();
-
- // computes pow(x,y) or exp(x). Fallback to runtime call included.
- void pow_or_exp(bool is_exp, int num_fpu_regs_in_use);
-
- // these are private because users should be doing movflt/movdbl
-
- void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); }
- void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
- void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); }
- void movss(XMMRegister dst, AddressLiteral src);
-
- void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
- void movlpd(XMMRegister dst, AddressLiteral src);
-
-public:
-
- void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); }
- void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); }
- void addsd(XMMRegister dst, AddressLiteral src);
-
- void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); }
- void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); }
- void addss(XMMRegister dst, AddressLiteral src);
-
- void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); }
- void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); }
- void divsd(XMMRegister dst, AddressLiteral src);
-
- void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); }
- void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
- void divss(XMMRegister dst, AddressLiteral src);
-
- // Move Unaligned Double Quadword
- void movdqu(Address dst, XMMRegister src) { Assembler::movdqu(dst, src); }
- void movdqu(XMMRegister dst, Address src) { Assembler::movdqu(dst, src); }
- void movdqu(XMMRegister dst, XMMRegister src) { Assembler::movdqu(dst, src); }
- void movdqu(XMMRegister dst, AddressLiteral src);
-
- void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
- void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
- void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
- void movsd(XMMRegister dst, AddressLiteral src);
-
- void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); }
- void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); }
- void mulsd(XMMRegister dst, AddressLiteral src);
-
- void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); }
- void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); }
- void mulss(XMMRegister dst, AddressLiteral src);
-
- void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); }
- void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); }
- void sqrtsd(XMMRegister dst, AddressLiteral src);
-
- void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); }
- void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); }
- void sqrtss(XMMRegister dst, AddressLiteral src);
-
- void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); }
- void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); }
- void subsd(XMMRegister dst, AddressLiteral src);
-
- void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); }
- void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); }
- void subss(XMMRegister dst, AddressLiteral src);
-
- void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
- void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
- void ucomiss(XMMRegister dst, AddressLiteral src);
-
- void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
- void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); }
- void ucomisd(XMMRegister dst, AddressLiteral src);
-
- // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
- void xorpd(XMMRegister dst, XMMRegister src) { Assembler::xorpd(dst, src); }
- void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); }
- void xorpd(XMMRegister dst, AddressLiteral src);
-
- // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
- void xorps(XMMRegister dst, XMMRegister src) { Assembler::xorps(dst, src); }
- void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); }
- void xorps(XMMRegister dst, AddressLiteral src);
-
- // Shuffle Bytes
- void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); }
- void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); }
- void pshufb(XMMRegister dst, AddressLiteral src);
- // AVX 3-operands instructions
-
- void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
- void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); }
- void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
-
- void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); }
- void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); }
- void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
-
- void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vandpd(dst, nds, src, vector256); }
- void vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vandpd(dst, nds, src, vector256); }
- void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
-
- void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vandps(dst, nds, src, vector256); }
- void vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vandps(dst, nds, src, vector256); }
- void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
-
- void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
- void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); }
- void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
-
- void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
- void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); }
- void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
-
- void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
- void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); }
- void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
-
- void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
- void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); }
- void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
-
- void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
- void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); }
- void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
-
- void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
- void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); }
- void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
-
- // AVX Vector instructions
-
- void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); }
- void vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); }
- void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
-
- void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); }
- void vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); }
- void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
-
- void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
- if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2
- Assembler::vpxor(dst, nds, src, vector256);
- else
- Assembler::vxorpd(dst, nds, src, vector256);
- }
- void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
- if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2
- Assembler::vpxor(dst, nds, src, vector256);
- else
- Assembler::vxorpd(dst, nds, src, vector256);
- }
-
- // Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector.
- void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
- if (UseAVX > 1) // vinserti128h is available only in AVX2
- Assembler::vinserti128h(dst, nds, src);
- else
- Assembler::vinsertf128h(dst, nds, src);
- }
-
- // Data
-
- void cmov32( Condition cc, Register dst, Address src);
- void cmov32( Condition cc, Register dst, Register src);
-
- void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
-
- void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
- void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
-
- void movoop(Register dst, jobject obj);
- void movoop(Address dst, jobject obj);
-
- void mov_metadata(Register dst, Metadata* obj);
- void mov_metadata(Address dst, Metadata* obj);
-
- void movptr(ArrayAddress dst, Register src);
- // can this do an lea?
- void movptr(Register dst, ArrayAddress src);
-
- void movptr(Register dst, Address src);
-
- void movptr(Register dst, AddressLiteral src);
-
- void movptr(Register dst, intptr_t src);
- void movptr(Register dst, Register src);
- void movptr(Address dst, intptr_t src);
-
- void movptr(Address dst, Register src);
-
- void movptr(Register dst, RegisterOrConstant src) {
- if (src.is_constant()) movptr(dst, src.as_constant());
- else movptr(dst, src.as_register());
- }
-
-#ifdef _LP64
- // Generally the next two are only used for moving NULL
- // Although there are situations in initializing the mark word where
- // they could be used. They are dangerous.
-
- // They only exist on LP64 so that int32_t and intptr_t are not the same
- // and we have ambiguous declarations.
-
- void movptr(Address dst, int32_t imm32);
- void movptr(Register dst, int32_t imm32);
-#endif // _LP64
-
- // to avoid hiding movl
- void mov32(AddressLiteral dst, Register src);
- void mov32(Register dst, AddressLiteral src);
-
- // to avoid hiding movb
- void movbyte(ArrayAddress dst, int src);
-
- // Import other mov() methods from the parent class or else
- // they will be hidden by the following overriding declaration.
- using Assembler::movdl;
- using Assembler::movq;
- void movdl(XMMRegister dst, AddressLiteral src);
- void movq(XMMRegister dst, AddressLiteral src);
-
- // Can push value or effective address
- void pushptr(AddressLiteral src);
-
- void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
- void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
-
- void pushoop(jobject obj);
- void pushklass(Metadata* obj);
-
- // sign extend as need a l to ptr sized element
- void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
- void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
-
- // C2 compiled method's prolog code.
- void verified_entry(int framesize, bool stack_bang, bool fp_mode_24b);
-
- // IndexOf strings.
- // Small strings are loaded through stack if they cross page boundary.
- void string_indexof(Register str1, Register str2,
- Register cnt1, Register cnt2,
- int int_cnt2, Register result,
- XMMRegister vec, Register tmp);
-
- // IndexOf for constant substrings with size >= 8 elements
- // which don't need to be loaded through stack.
- void string_indexofC8(Register str1, Register str2,
- Register cnt1, Register cnt2,
- int int_cnt2, Register result,
- XMMRegister vec, Register tmp);
-
- // Smallest code: we don't need to load through stack,
- // check string tail.
-
- // Compare strings.
- void string_compare(Register str1, Register str2,
- Register cnt1, Register cnt2, Register result,
- XMMRegister vec1);
-
- // Compare char[] arrays.
- void char_arrays_equals(bool is_array_equ, Register ary1, Register ary2,
- Register limit, Register result, Register chr,
- XMMRegister vec1, XMMRegister vec2);
-
- // Fill primitive arrays
- void generate_fill(BasicType t, bool aligned,
- Register to, Register value, Register count,
- Register rtmp, XMMRegister xtmp);
-
-#undef VIRTUAL
-
-};
-
-/**
- * class SkipIfEqual:
- *
- * Instantiating this class will result in assembly code being output that will
- * jump around any code emitted between the creation of the instance and it's
- * automatic destruction at the end of a scope block, depending on the value of
- * the flag passed to the constructor, which will be checked at run-time.
- */
-class SkipIfEqual {
- private:
- MacroAssembler* _masm;
- Label _label;
-
- public:
- SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value);
- ~SkipIfEqual();
-};
-
-#ifdef ASSERT
-inline bool AbstractAssembler::pd_check_instruction_mark() { return true; }
-#endif
-
#endif // CPU_X86_VM_ASSEMBLER_X86_HPP
--- a/hotspot/src/cpu/x86/vm/assembler_x86.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -28,48 +28,6 @@
#include "asm/assembler.inline.hpp"
#include "asm/codeBuffer.hpp"
#include "code/codeCache.hpp"
-#include "runtime/handles.inline.hpp"
-
-inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
- unsigned char op = branch[0];
- assert(op == 0xE8 /* call */ ||
- op == 0xE9 /* jmp */ ||
- op == 0xEB /* short jmp */ ||
- (op & 0xF0) == 0x70 /* short jcc */ ||
- op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */,
- "Invalid opcode at patch point");
-
- if (op == 0xEB || (op & 0xF0) == 0x70) {
- // short offset operators (jmp and jcc)
- char* disp = (char*) &branch[1];
- int imm8 = target - (address) &disp[1];
- guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
- *disp = imm8;
- } else {
- int* disp = (int*) &branch[(op == 0x0F)? 2: 1];
- int imm32 = target - (address) &disp[1];
- *disp = imm32;
- }
-}
-
-#ifndef PRODUCT
-inline void MacroAssembler::pd_print_patched_instruction(address branch) {
- const char* s;
- unsigned char op = branch[0];
- if (op == 0xE8) {
- s = "call";
- } else if (op == 0xE9 || op == 0xEB) {
- s = "jmp";
- } else if ((op & 0xF0) == 0x70) {
- s = "jcc";
- } else if (op == 0x0F) {
- s = "jcc";
- } else {
- s = "????";
- }
- tty->print("%s (unresolved)", s);
-}
-#endif // ndef PRODUCT
#ifndef _LP64
inline int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { return reg_enc; }
@@ -87,12 +45,6 @@
inline void Assembler::prefix(Address adr, XMMRegister reg) {}
inline void Assembler::prefixq(Address adr, XMMRegister reg) {}
-#else
-inline void Assembler::emit_long64(jlong x) {
- *(jlong*) _code_pos = x;
- _code_pos += sizeof(jlong);
- code_section()->set_end(_code_pos);
-}
#endif // _LP64
#endif // CPU_X86_VM_ASSEMBLER_X86_INLINE_HPP
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_Compilation.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
--- a/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
+#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/cppInterpreter.hpp"
#include "interpreter/interpreter.hpp"
@@ -538,9 +538,9 @@
// compute full expression stack limit
- const Address size_of_stack (rbx, Method::max_stack_offset());
const int extra_stack = 0; //6815692//Method::extra_stack_words();
- __ load_unsigned_short(rdx, size_of_stack); // get size of expression stack in words
+ __ movptr(rdx, Address(rbx, Method::const_offset()));
+ __ load_unsigned_short(rdx, Address(rdx, ConstMethod::max_stack_offset())); // get size of expression stack in words
__ negptr(rdx); // so we can subtract in next step
// Allocate expression stack
__ lea(rsp, Address(rsp, rdx, Address::times_ptr, -extra_stack));
@@ -682,12 +682,12 @@
const Address stack_size(thread, Thread::stack_size_offset());
// locals + overhead, in bytes
- const Address size_of_stack (rbx, Method::max_stack_offset());
- // Always give one monitor to allow us to start interp if sync method.
- // Any additional monitors need a check when moving the expression stack
- const int one_monitor = frame::interpreter_frame_monitor_size() * wordSize;
- const int extra_stack = 0; //6815692//Method::extra_stack_entries();
- __ load_unsigned_short(rax, size_of_stack); // get size of expression stack in words
+ // Always give one monitor to allow us to start interp if sync method.
+ // Any additional monitors need a check when moving the expression stack
+ const int one_monitor = frame::interpreter_frame_monitor_size() * wordSize;
+ const int extra_stack = 0; //6815692//Method::extra_stack_entries();
+ __ movptr(rax, Address(rbx, Method::const_offset()));
+ __ load_unsigned_short(rax, Address(rax, ConstMethod::max_stack_offset())); // get size of expression stack in words
__ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), extra_stack + one_monitor));
__ lea(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size));
--- a/hotspot/src/cpu/x86/vm/frame_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/frame_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -534,7 +534,7 @@
Method* m = *interpreter_frame_method_addr();
// validate the method we'd find in this potential sender
- if (!Universe::heap()->is_valid_method(m)) return false;
+ if (!m->is_valid_method()) return false;
// stack frames shouldn't be much larger than max_stack elements
--- a/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,6 +25,8 @@
#ifndef CPU_X86_VM_FRAME_X86_INLINE_HPP
#define CPU_X86_VM_FRAME_X86_INLINE_HPP
+#include "code/codeCache.hpp"
+
// Inline functions for Intel frames:
// Constructors:
--- a/hotspot/src/cpu/x86/vm/icBuffer_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/icBuffer_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "code/icBuffer.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/bytecodes.hpp"
--- a/hotspot/src/cpu/x86/vm/icache_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/icache_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
#include "runtime/icache.hpp"
#define __ _masm->
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -36,18 +36,7 @@
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/sharedRuntime.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
+#include "runtime/thread.inline.hpp"
// Implementation of InterpreterMacroAssembler
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,8 +25,10 @@
#ifndef CPU_X86_VM_INTERP_MASM_X86_32_HPP
#define CPU_X86_VM_INTERP_MASM_X86_32_HPP
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "interpreter/invocationCounter.hpp"
+#include "runtime/frame.hpp"
// This file specializes the assember with interpreter-specific macros
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -36,18 +36,7 @@
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/sharedRuntime.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
+#include "runtime/thread.inline.hpp"
// Implementation of InterpreterMacroAssembler
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,8 +25,10 @@
#ifndef CPU_X86_VM_INTERP_MASM_X86_64_HPP
#define CPU_X86_VM_INTERP_MASM_X86_64_HPP
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "interpreter/invocationCounter.hpp"
+#include "runtime/frame.hpp"
// This file specializes the assember with interpreter-specific macros
--- a/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
+#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"
--- a/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
+#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"
--- a/hotspot/src/cpu/x86/vm/jniFastGetField_x86_32.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/jniFastGetField_x86_32.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
--- a/hotspot/src/cpu/x86/vm/jniFastGetField_x86_64.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/jniFastGetField_x86_64.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,6099 @@
+/*
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "asm/assembler.inline.hpp"
+#include "compiler/disassembler.hpp"
+#include "gc_interface/collectedHeap.inline.hpp"
+#include "interpreter/interpreter.hpp"
+#include "memory/cardTableModRefBS.hpp"
+#include "memory/resourceArea.hpp"
+#include "prims/methodHandles.hpp"
+#include "runtime/biasedLocking.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/objectMonitor.hpp"
+#include "runtime/os.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#ifndef SERIALGC
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#endif
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#define STOP(error) stop(error)
+#else
+#define BLOCK_COMMENT(str) block_comment(str)
+#define STOP(error) block_comment(error); stop(error)
+#endif
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+
+
+#ifdef ASSERT
+bool AbstractAssembler::pd_check_instruction_mark() { return true; }
+#endif
+
+static Assembler::Condition reverse[] = {
+ Assembler::noOverflow /* overflow = 0x0 */ ,
+ Assembler::overflow /* noOverflow = 0x1 */ ,
+ Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ ,
+ Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ ,
+ Assembler::notZero /* zero = 0x4, equal = 0x4 */ ,
+ Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ ,
+ Assembler::above /* belowEqual = 0x6 */ ,
+ Assembler::belowEqual /* above = 0x7 */ ,
+ Assembler::positive /* negative = 0x8 */ ,
+ Assembler::negative /* positive = 0x9 */ ,
+ Assembler::noParity /* parity = 0xa */ ,
+ Assembler::parity /* noParity = 0xb */ ,
+ Assembler::greaterEqual /* less = 0xc */ ,
+ Assembler::less /* greaterEqual = 0xd */ ,
+ Assembler::greater /* lessEqual = 0xe */ ,
+ Assembler::lessEqual /* greater = 0xf, */
+
+};
+
+
+// Implementation of MacroAssembler
+
+// First all the versions that have distinct versions depending on 32/64 bit
+// Unless the difference is trivial (1 line or so).
+
+#ifndef _LP64
+
+// 32bit versions
+
+Address MacroAssembler::as_Address(AddressLiteral adr) {
+ return Address(adr.target(), adr.rspec());
+}
+
+Address MacroAssembler::as_Address(ArrayAddress adr) {
+ return Address::make_array(adr);
+}
+
+int MacroAssembler::biased_locking_enter(Register lock_reg,
+ Register obj_reg,
+ Register swap_reg,
+ Register tmp_reg,
+ bool swap_reg_contains_mark,
+ Label& done,
+ Label* slow_case,
+ BiasedLockingCounters* counters) {
+ assert(UseBiasedLocking, "why call this otherwise?");
+ assert(swap_reg == rax, "swap_reg must be rax, for cmpxchg");
+ assert_different_registers(lock_reg, obj_reg, swap_reg);
+
+ if (PrintBiasedLockingStatistics && counters == NULL)
+ counters = BiasedLocking::counters();
+
+ bool need_tmp_reg = false;
+ if (tmp_reg == noreg) {
+ need_tmp_reg = true;
+ tmp_reg = lock_reg;
+ } else {
+ assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
+ }
+ assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
+ Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
+ Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes());
+ Address saved_mark_addr(lock_reg, 0);
+
+ // Biased locking
+ // See whether the lock is currently biased toward our thread and
+ // whether the epoch is still valid
+ // Note that the runtime guarantees sufficient alignment of JavaThread
+ // pointers to allow age to be placed into low bits
+ // First check to see whether biasing is even enabled for this object
+ Label cas_label;
+ int null_check_offset = -1;
+ if (!swap_reg_contains_mark) {
+ null_check_offset = offset();
+ movl(swap_reg, mark_addr);
+ }
+ if (need_tmp_reg) {
+ push(tmp_reg);
+ }
+ movl(tmp_reg, swap_reg);
+ andl(tmp_reg, markOopDesc::biased_lock_mask_in_place);
+ cmpl(tmp_reg, markOopDesc::biased_lock_pattern);
+ if (need_tmp_reg) {
+ pop(tmp_reg);
+ }
+ jcc(Assembler::notEqual, cas_label);
+ // The bias pattern is present in the object's header. Need to check
+ // whether the bias owner and the epoch are both still current.
+ // Note that because there is no current thread register on x86 we
+ // need to store off the mark word we read out of the object to
+ // avoid reloading it and needing to recheck invariants below. This
+ // store is unfortunate but it makes the overall code shorter and
+ // simpler.
+ movl(saved_mark_addr, swap_reg);
+ if (need_tmp_reg) {
+ push(tmp_reg);
+ }
+ get_thread(tmp_reg);
+ xorl(swap_reg, tmp_reg);
+ if (swap_reg_contains_mark) {
+ null_check_offset = offset();
+ }
+ movl(tmp_reg, klass_addr);
+ xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset()));
+ andl(swap_reg, ~((int) markOopDesc::age_mask_in_place));
+ if (need_tmp_reg) {
+ pop(tmp_reg);
+ }
+ if (counters != NULL) {
+ cond_inc32(Assembler::zero,
+ ExternalAddress((address)counters->biased_lock_entry_count_addr()));
+ }
+ jcc(Assembler::equal, done);
+
+ Label try_revoke_bias;
+ Label try_rebias;
+
+ // At this point we know that the header has the bias pattern and
+ // that we are not the bias owner in the current epoch. We need to
+ // figure out more details about the state of the header in order to
+ // know what operations can be legally performed on the object's
+ // header.
+
+ // If the low three bits in the xor result aren't clear, that means
+ // the prototype header is no longer biased and we have to revoke
+ // the bias on this object.
+ testl(swap_reg, markOopDesc::biased_lock_mask_in_place);
+ jcc(Assembler::notZero, try_revoke_bias);
+
+ // Biasing is still enabled for this data type. See whether the
+ // epoch of the current bias is still valid, meaning that the epoch
+ // bits of the mark word are equal to the epoch bits of the
+ // prototype header. (Note that the prototype header's epoch bits
+ // only change at a safepoint.) If not, attempt to rebias the object
+ // toward the current thread. Note that we must be absolutely sure
+ // that the current epoch is invalid in order to do this because
+ // otherwise the manipulations it performs on the mark word are
+ // illegal.
+ testl(swap_reg, markOopDesc::epoch_mask_in_place);
+ jcc(Assembler::notZero, try_rebias);
+
+ // The epoch of the current bias is still valid but we know nothing
+ // about the owner; it might be set or it might be clear. Try to
+ // acquire the bias of the object using an atomic operation. If this
+ // fails we will go in to the runtime to revoke the object's bias.
+ // Note that we first construct the presumed unbiased header so we
+ // don't accidentally blow away another thread's valid bias.
+ movl(swap_reg, saved_mark_addr);
+ andl(swap_reg,
+ markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
+ if (need_tmp_reg) {
+ push(tmp_reg);
+ }
+ get_thread(tmp_reg);
+ orl(tmp_reg, swap_reg);
+ if (os::is_MP()) {
+ lock();
+ }
+ cmpxchgptr(tmp_reg, Address(obj_reg, 0));
+ if (need_tmp_reg) {
+ pop(tmp_reg);
+ }
+ // If the biasing toward our thread failed, this means that
+ // another thread succeeded in biasing it toward itself and we
+ // need to revoke that bias. The revocation will occur in the
+ // interpreter runtime in the slow case.
+ if (counters != NULL) {
+ cond_inc32(Assembler::zero,
+ ExternalAddress((address)counters->anonymously_biased_lock_entry_count_addr()));
+ }
+ if (slow_case != NULL) {
+ jcc(Assembler::notZero, *slow_case);
+ }
+ jmp(done);
+
+ bind(try_rebias);
+ // At this point we know the epoch has expired, meaning that the
+ // current "bias owner", if any, is actually invalid. Under these
+ // circumstances _only_, we are allowed to use the current header's
+ // value as the comparison value when doing the cas to acquire the
+ // bias in the current epoch. In other words, we allow transfer of
+ // the bias from one thread to another directly in this situation.
+ //
+ // FIXME: due to a lack of registers we currently blow away the age
+ // bits in this situation. Should attempt to preserve them.
+ if (need_tmp_reg) {
+ push(tmp_reg);
+ }
+ get_thread(tmp_reg);
+ movl(swap_reg, klass_addr);
+ orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset()));
+ movl(swap_reg, saved_mark_addr);
+ if (os::is_MP()) {
+ lock();
+ }
+ cmpxchgptr(tmp_reg, Address(obj_reg, 0));
+ if (need_tmp_reg) {
+ pop(tmp_reg);
+ }
+ // If the biasing toward our thread failed, then another thread
+ // succeeded in biasing it toward itself and we need to revoke that
+ // bias. The revocation will occur in the runtime in the slow case.
+ if (counters != NULL) {
+ cond_inc32(Assembler::zero,
+ ExternalAddress((address)counters->rebiased_lock_entry_count_addr()));
+ }
+ if (slow_case != NULL) {
+ jcc(Assembler::notZero, *slow_case);
+ }
+ jmp(done);
+
+ bind(try_revoke_bias);
+ // The prototype mark in the klass doesn't have the bias bit set any
+ // more, indicating that objects of this data type are not supposed
+ // to be biased any more. We are going to try to reset the mark of
+ // this object to the prototype value and fall through to the
+ // CAS-based locking scheme. Note that if our CAS fails, it means
+ // that another thread raced us for the privilege of revoking the
+ // bias of this particular object, so it's okay to continue in the
+ // normal locking code.
+ //
+ // FIXME: due to a lack of registers we currently blow away the age
+ // bits in this situation. Should attempt to preserve them.
+ movl(swap_reg, saved_mark_addr);
+ if (need_tmp_reg) {
+ push(tmp_reg);
+ }
+ movl(tmp_reg, klass_addr);
+ movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset()));
+ if (os::is_MP()) {
+ lock();
+ }
+ cmpxchgptr(tmp_reg, Address(obj_reg, 0));
+ if (need_tmp_reg) {
+ pop(tmp_reg);
+ }
+ // Fall through to the normal CAS-based lock, because no matter what
+ // the result of the above CAS, some thread must have succeeded in
+ // removing the bias bit from the object's header.
+ if (counters != NULL) {
+ cond_inc32(Assembler::zero,
+ ExternalAddress((address)counters->revoked_lock_entry_count_addr()));
+ }
+
+ bind(cas_label);
+
+ return null_check_offset;
+}
+void MacroAssembler::call_VM_leaf_base(address entry_point,
+ int number_of_arguments) {
+ call(RuntimeAddress(entry_point));
+ increment(rsp, number_of_arguments * wordSize);
+}
+
+void MacroAssembler::cmpklass(Address src1, Metadata* obj) {
+ cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
+}
+
+void MacroAssembler::cmpklass(Register src1, Metadata* obj) {
+ cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
+}
+
+void MacroAssembler::cmpoop(Address src1, jobject obj) {
+ cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
+}
+
+void MacroAssembler::cmpoop(Register src1, jobject obj) {
+ cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
+}
+
+void MacroAssembler::extend_sign(Register hi, Register lo) {
+ // According to Intel Doc. AP-526, "Integer Divide", p.18.
+ if (VM_Version::is_P6() && hi == rdx && lo == rax) {
+ cdql();
+ } else {
+ movl(hi, lo);
+ sarl(hi, 31);
+ }
+}
+
+void MacroAssembler::jC2(Register tmp, Label& L) {
+ // set parity bit if FPU flag C2 is set (via rax)
+ save_rax(tmp);
+ fwait(); fnstsw_ax();
+ sahf();
+ restore_rax(tmp);
+ // branch
+ jcc(Assembler::parity, L);
+}
+
+void MacroAssembler::jnC2(Register tmp, Label& L) {
+ // set parity bit if FPU flag C2 is set (via rax)
+ save_rax(tmp);
+ fwait(); fnstsw_ax();
+ sahf();
+ restore_rax(tmp);
+ // branch
+ jcc(Assembler::noParity, L);
+}
+
+// 32bit can do a case table jump in one instruction but we no longer allow the base
+// to be installed in the Address class
+void MacroAssembler::jump(ArrayAddress entry) {
+ jmp(as_Address(entry));
+}
+
+// Note: y_lo will be destroyed
+void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
+ // Long compare for Java (semantics as described in JVM spec.)
+ Label high, low, done;
+
+ cmpl(x_hi, y_hi);
+ jcc(Assembler::less, low);
+ jcc(Assembler::greater, high);
+ // x_hi is the return register
+ xorl(x_hi, x_hi);
+ cmpl(x_lo, y_lo);
+ jcc(Assembler::below, low);
+ jcc(Assembler::equal, done);
+
+ bind(high);
+ xorl(x_hi, x_hi);
+ increment(x_hi);
+ jmp(done);
+
+ bind(low);
+ xorl(x_hi, x_hi);
+ decrementl(x_hi);
+
+ bind(done);
+}
+
+void MacroAssembler::lea(Register dst, AddressLiteral src) {
+ mov_literal32(dst, (int32_t)src.target(), src.rspec());
+}
+
+void MacroAssembler::lea(Address dst, AddressLiteral adr) {
+ // leal(dst, as_Address(adr));
+ // see note in movl as to why we must use a move
+ mov_literal32(dst, (int32_t) adr.target(), adr.rspec());
+}
+
+void MacroAssembler::leave() {
+ mov(rsp, rbp);
+ pop(rbp);
+}
+
+void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) {
+ // Multiplication of two Java long values stored on the stack
+ // as illustrated below. Result is in rdx:rax.
+ //
+ // rsp ---> [ ?? ] \ \
+ // .... | y_rsp_offset |
+ // [ y_lo ] / (in bytes) | x_rsp_offset
+ // [ y_hi ] | (in bytes)
+ // .... |
+ // [ x_lo ] /
+ // [ x_hi ]
+ // ....
+ //
+ // Basic idea: lo(result) = lo(x_lo * y_lo)
+ // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
+ Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset);
+ Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset);
+ Label quick;
+ // load x_hi, y_hi and check if quick
+ // multiplication is possible
+ movl(rbx, x_hi);
+ movl(rcx, y_hi);
+ movl(rax, rbx);
+ orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0
+ jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply
+ // do full multiplication
+ // 1st step
+ mull(y_lo); // x_hi * y_lo
+ movl(rbx, rax); // save lo(x_hi * y_lo) in rbx,
+ // 2nd step
+ movl(rax, x_lo);
+ mull(rcx); // x_lo * y_hi
+ addl(rbx, rax); // add lo(x_lo * y_hi) to rbx,
+ // 3rd step
+ bind(quick); // note: rbx, = 0 if quick multiply!
+ movl(rax, x_lo);
+ mull(y_lo); // x_lo * y_lo
+ addl(rdx, rbx); // correct hi(x_lo * y_lo)
+}
+
+void MacroAssembler::lneg(Register hi, Register lo) {
+ negl(lo);
+ adcl(hi, 0);
+ negl(hi);
+}
+
+void MacroAssembler::lshl(Register hi, Register lo) {
+ // Java shift left long support (semantics as described in JVM spec., p.305)
+ // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n))
+ // shift value is in rcx !
+ assert(hi != rcx, "must not use rcx");
+ assert(lo != rcx, "must not use rcx");
+ const Register s = rcx; // shift count
+ const int n = BitsPerWord;
+ Label L;
+ andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
+ cmpl(s, n); // if (s < n)
+ jcc(Assembler::less, L); // else (s >= n)
+ movl(hi, lo); // x := x << n
+ xorl(lo, lo);
+ // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
+ bind(L); // s (mod n) < n
+ shldl(hi, lo); // x := x << s
+ shll(lo);
+}
+
+
+void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) {
+ // Java shift right long support (semantics as described in JVM spec., p.306 & p.310)
+ // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n))
+ assert(hi != rcx, "must not use rcx");
+ assert(lo != rcx, "must not use rcx");
+ const Register s = rcx; // shift count
+ const int n = BitsPerWord;
+ Label L;
+ andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
+ cmpl(s, n); // if (s < n)
+ jcc(Assembler::less, L); // else (s >= n)
+ movl(lo, hi); // x := x >> n
+ if (sign_extension) sarl(hi, 31);
+ else xorl(hi, hi);
+ // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
+ bind(L); // s (mod n) < n
+ shrdl(lo, hi); // x := x >> s
+ if (sign_extension) sarl(hi);
+ else shrl(hi);
+}
+
+void MacroAssembler::movoop(Register dst, jobject obj) {
+ mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
+}
+
+void MacroAssembler::movoop(Address dst, jobject obj) {
+ mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
+}
+
+void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
+ mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
+}
+
+void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
+ mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
+}
+
+void MacroAssembler::movptr(Register dst, AddressLiteral src) {
+ if (src.is_lval()) {
+ mov_literal32(dst, (intptr_t)src.target(), src.rspec());
+ } else {
+ movl(dst, as_Address(src));
+ }
+}
+
+void MacroAssembler::movptr(ArrayAddress dst, Register src) {
+ movl(as_Address(dst), src);
+}
+
+void MacroAssembler::movptr(Register dst, ArrayAddress src) {
+ movl(dst, as_Address(src));
+}
+
+// src should NEVER be a real pointer. Use AddressLiteral for true pointers
+void MacroAssembler::movptr(Address dst, intptr_t src) {
+ movl(dst, src);
+}
+
+
+void MacroAssembler::pop_callee_saved_registers() {
+ pop(rcx);
+ pop(rdx);
+ pop(rdi);
+ pop(rsi);
+}
+
+void MacroAssembler::pop_fTOS() {
+ fld_d(Address(rsp, 0));
+ addl(rsp, 2 * wordSize);
+}
+
+void MacroAssembler::push_callee_saved_registers() {
+ push(rsi);
+ push(rdi);
+ push(rdx);
+ push(rcx);
+}
+
+void MacroAssembler::push_fTOS() {
+ subl(rsp, 2 * wordSize);
+ fstp_d(Address(rsp, 0));
+}
+
+
+void MacroAssembler::pushoop(jobject obj) {
+ push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate());
+}
+
+void MacroAssembler::pushklass(Metadata* obj) {
+ push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate());
+}
+
+void MacroAssembler::pushptr(AddressLiteral src) {
+ if (src.is_lval()) {
+ push_literal32((int32_t)src.target(), src.rspec());
+ } else {
+ pushl(as_Address(src));
+ }
+}
+
+void MacroAssembler::set_word_if_not_zero(Register dst) {
+ xorl(dst, dst);
+ set_byte_if_not_zero(dst);
+}
+
+static void pass_arg0(MacroAssembler* masm, Register arg) {
+ masm->push(arg);
+}
+
+static void pass_arg1(MacroAssembler* masm, Register arg) {
+ masm->push(arg);
+}
+
+static void pass_arg2(MacroAssembler* masm, Register arg) {
+ masm->push(arg);
+}
+
+static void pass_arg3(MacroAssembler* masm, Register arg) {
+ masm->push(arg);
+}
+
+#ifndef PRODUCT
+extern "C" void findpc(intptr_t x);
+#endif
+
+void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
+ // In order to get locks to work, we need to fake a in_VM state
+ JavaThread* thread = JavaThread::current();
+ JavaThreadState saved_state = thread->thread_state();
+ thread->set_thread_state(_thread_in_vm);
+ if (ShowMessageBoxOnError) {
+ JavaThread* thread = JavaThread::current();
+ JavaThreadState saved_state = thread->thread_state();
+ thread->set_thread_state(_thread_in_vm);
+ if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
+ ttyLocker ttyl;
+ BytecodeCounter::print();
+ }
+ // To see where a verify_oop failed, get $ebx+40/X for this frame.
+ // This is the value of eip which points to where verify_oop will return.
+ if (os::message_box(msg, "Execution stopped, print registers?")) {
+ print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip);
+ BREAKPOINT;
+ }
+ } else {
+ ttyLocker ttyl;
+ ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
+ }
+ // Don't assert holding the ttyLock
+ assert(false, err_msg("DEBUG MESSAGE: %s", msg));
+ ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
+}
+
+void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) {
+ ttyLocker ttyl;
+ FlagSetting fs(Debugging, true);
+ tty->print_cr("eip = 0x%08x", eip);
+#ifndef PRODUCT
+ if ((WizardMode || Verbose) && PrintMiscellaneous) {
+ tty->cr();
+ findpc(eip);
+ tty->cr();
+ }
+#endif
+#define PRINT_REG(rax) \
+ { tty->print("%s = ", #rax); os::print_location(tty, rax); }
+ PRINT_REG(rax);
+ PRINT_REG(rbx);
+ PRINT_REG(rcx);
+ PRINT_REG(rdx);
+ PRINT_REG(rdi);
+ PRINT_REG(rsi);
+ PRINT_REG(rbp);
+ PRINT_REG(rsp);
+#undef PRINT_REG
+ // Print some words near top of staack.
+ int* dump_sp = (int*) rsp;
+ for (int col1 = 0; col1 < 8; col1++) {
+ tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
+ os::print_location(tty, *dump_sp++);
+ }
+ for (int row = 0; row < 16; row++) {
+ tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
+ for (int col = 0; col < 8; col++) {
+ tty->print(" 0x%08x", *dump_sp++);
+ }
+ tty->cr();
+ }
+ // Print some instructions around pc:
+ Disassembler::decode((address)eip-64, (address)eip);
+ tty->print_cr("--------");
+ Disassembler::decode((address)eip, (address)eip+32);
+}
+
+void MacroAssembler::stop(const char* msg) {
+ ExternalAddress message((address)msg);
+ // push address of message
+ pushptr(message.addr());
+ { Label L; call(L, relocInfo::none); bind(L); } // push eip
+ pusha(); // push registers
+ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
+ hlt();
+}
+
+void MacroAssembler::warn(const char* msg) {
+ push_CPU_state();
+
+ ExternalAddress message((address) msg);
+ // push address of message
+ pushptr(message.addr());
+
+ call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
+ addl(rsp, wordSize); // discard argument
+ pop_CPU_state();
+}
+
+void MacroAssembler::print_state() {
+ { Label L; call(L, relocInfo::none); bind(L); } // push eip
+ pusha(); // push registers
+
+ push_CPU_state();
+ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32)));
+ pop_CPU_state();
+
+ popa();
+ addl(rsp, wordSize);
+}
+
+#else // _LP64
+
+// 64 bit versions
+
+Address MacroAssembler::as_Address(AddressLiteral adr) {
+ // amd64 always does this as a pc-rel
+ // we can be absolute or disp based on the instruction type
+ // jmp/call are displacements others are absolute
+ assert(!adr.is_lval(), "must be rval");
+ assert(reachable(adr), "must be");
+ return Address((int32_t)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc());
+
+}
+
+Address MacroAssembler::as_Address(ArrayAddress adr) {
+ AddressLiteral base = adr.base();
+ lea(rscratch1, base);
+ Address index = adr.index();
+ assert(index._disp == 0, "must not have disp"); // maybe it can?
+ Address array(rscratch1, index._index, index._scale, index._disp);
+ return array;
+}
+
+int MacroAssembler::biased_locking_enter(Register lock_reg,
+ Register obj_reg,
+ Register swap_reg,
+ Register tmp_reg,
+ bool swap_reg_contains_mark,
+ Label& done,
+ Label* slow_case,
+ BiasedLockingCounters* counters) {
+ assert(UseBiasedLocking, "why call this otherwise?");
+ assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
+ assert(tmp_reg != noreg, "tmp_reg must be supplied");
+ assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
+ assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
+ Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
+ Address saved_mark_addr(lock_reg, 0);
+
+ if (PrintBiasedLockingStatistics && counters == NULL)
+ counters = BiasedLocking::counters();
+
+ // Biased locking
+ // See whether the lock is currently biased toward our thread and
+ // whether the epoch is still valid
+ // Note that the runtime guarantees sufficient alignment of JavaThread
+ // pointers to allow age to be placed into low bits
+ // First check to see whether biasing is even enabled for this object
+ Label cas_label;
+ int null_check_offset = -1;
+ if (!swap_reg_contains_mark) {
+ null_check_offset = offset();
+ movq(swap_reg, mark_addr);
+ }
+ movq(tmp_reg, swap_reg);
+ andq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
+ cmpq(tmp_reg, markOopDesc::biased_lock_pattern);
+ jcc(Assembler::notEqual, cas_label);
+ // The bias pattern is present in the object's header. Need to check
+ // whether the bias owner and the epoch are both still current.
+ load_prototype_header(tmp_reg, obj_reg);
+ orq(tmp_reg, r15_thread);
+ xorq(tmp_reg, swap_reg);
+ andq(tmp_reg, ~((int) markOopDesc::age_mask_in_place));
+ if (counters != NULL) {
+ cond_inc32(Assembler::zero,
+ ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
+ }
+ jcc(Assembler::equal, done);
+
+ Label try_revoke_bias;
+ Label try_rebias;
+
+ // At this point we know that the header has the bias pattern and
+ // that we are not the bias owner in the current epoch. We need to
+ // figure out more details about the state of the header in order to
+ // know what operations can be legally performed on the object's
+ // header.
+
+ // If the low three bits in the xor result aren't clear, that means
+ // the prototype header is no longer biased and we have to revoke
+ // the bias on this object.
+ testq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
+ jcc(Assembler::notZero, try_revoke_bias);
+
+ // Biasing is still enabled for this data type. See whether the
+ // epoch of the current bias is still valid, meaning that the epoch
+ // bits of the mark word are equal to the epoch bits of the
+ // prototype header. (Note that the prototype header's epoch bits
+ // only change at a safepoint.) If not, attempt to rebias the object
+ // toward the current thread. Note that we must be absolutely sure
+ // that the current epoch is invalid in order to do this because
+ // otherwise the manipulations it performs on the mark word are
+ // illegal.
+ testq(tmp_reg, markOopDesc::epoch_mask_in_place);
+ jcc(Assembler::notZero, try_rebias);
+
+ // The epoch of the current bias is still valid but we know nothing
+ // about the owner; it might be set or it might be clear. Try to
+ // acquire the bias of the object using an atomic operation. If this
+ // fails we will go in to the runtime to revoke the object's bias.
+ // Note that we first construct the presumed unbiased header so we
+ // don't accidentally blow away another thread's valid bias.
+ andq(swap_reg,
+ markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
+ movq(tmp_reg, swap_reg);
+ orq(tmp_reg, r15_thread);
+ if (os::is_MP()) {
+ lock();
+ }
+ cmpxchgq(tmp_reg, Address(obj_reg, 0));
+ // If the biasing toward our thread failed, this means that
+ // another thread succeeded in biasing it toward itself and we
+ // need to revoke that bias. The revocation will occur in the
+ // interpreter runtime in the slow case.
+ if (counters != NULL) {
+ cond_inc32(Assembler::zero,
+ ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
+ }
+ if (slow_case != NULL) {
+ jcc(Assembler::notZero, *slow_case);
+ }
+ jmp(done);
+
+ bind(try_rebias);
+ // At this point we know the epoch has expired, meaning that the
+ // current "bias owner", if any, is actually invalid. Under these
+ // circumstances _only_, we are allowed to use the current header's
+ // value as the comparison value when doing the cas to acquire the
+ // bias in the current epoch. In other words, we allow transfer of
+ // the bias from one thread to another directly in this situation.
+ //
+ // FIXME: due to a lack of registers we currently blow away the age
+ // bits in this situation. Should attempt to preserve them.
+ load_prototype_header(tmp_reg, obj_reg);
+ orq(tmp_reg, r15_thread);
+ if (os::is_MP()) {
+ lock();
+ }
+ cmpxchgq(tmp_reg, Address(obj_reg, 0));
+ // If the biasing toward our thread failed, then another thread
+ // succeeded in biasing it toward itself and we need to revoke that
+ // bias. The revocation will occur in the runtime in the slow case.
+ if (counters != NULL) {
+ cond_inc32(Assembler::zero,
+ ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
+ }
+ if (slow_case != NULL) {
+ jcc(Assembler::notZero, *slow_case);
+ }
+ jmp(done);
+
+ bind(try_revoke_bias);
+ // The prototype mark in the klass doesn't have the bias bit set any
+ // more, indicating that objects of this data type are not supposed
+ // to be biased any more. We are going to try to reset the mark of
+ // this object to the prototype value and fall through to the
+ // CAS-based locking scheme. Note that if our CAS fails, it means
+ // that another thread raced us for the privilege of revoking the
+ // bias of this particular object, so it's okay to continue in the
+ // normal locking code.
+ //
+ // FIXME: due to a lack of registers we currently blow away the age
+ // bits in this situation. Should attempt to preserve them.
+ load_prototype_header(tmp_reg, obj_reg);
+ if (os::is_MP()) {
+ lock();
+ }
+ cmpxchgq(tmp_reg, Address(obj_reg, 0));
+ // Fall through to the normal CAS-based lock, because no matter what
+ // the result of the above CAS, some thread must have succeeded in
+ // removing the bias bit from the object's header.
+ if (counters != NULL) {
+ cond_inc32(Assembler::zero,
+ ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
+ }
+
+ bind(cas_label);
+
+ return null_check_offset;
+}
+
+void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
+ Label L, E;
+
+#ifdef _WIN64
+ // Windows always allocates space for it's register args
+ assert(num_args <= 4, "only register arguments supported");
+ subq(rsp, frame::arg_reg_save_area_bytes);
+#endif
+
+ // Align stack if necessary
+ testl(rsp, 15);
+ jcc(Assembler::zero, L);
+
+ subq(rsp, 8);
+ {
+ call(RuntimeAddress(entry_point));
+ }
+ addq(rsp, 8);
+ jmp(E);
+
+ bind(L);
+ {
+ call(RuntimeAddress(entry_point));
+ }
+
+ bind(E);
+
+#ifdef _WIN64
+ // restore stack pointer
+ addq(rsp, frame::arg_reg_save_area_bytes);
+#endif
+
+}
+
+void MacroAssembler::cmp64(Register src1, AddressLiteral src2) {
+ assert(!src2.is_lval(), "should use cmpptr");
+
+ if (reachable(src2)) {
+ cmpq(src1, as_Address(src2));
+ } else {
+ lea(rscratch1, src2);
+ Assembler::cmpq(src1, Address(rscratch1, 0));
+ }
+}
+
+int MacroAssembler::corrected_idivq(Register reg) {
+ // Full implementation of Java ldiv and lrem; checks for special
+ // case as described in JVM spec., p.243 & p.271. The function
+ // returns the (pc) offset of the idivl instruction - may be needed
+ // for implicit exceptions.
+ //
+ // normal case special case
+ //
+ // input : rax: dividend min_long
+ // reg: divisor (may not be eax/edx) -1
+ //
+ // output: rax: quotient (= rax idiv reg) min_long
+ // rdx: remainder (= rax irem reg) 0
+ assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
+ static const int64_t min_long = 0x8000000000000000;
+ Label normal_case, special_case;
+
+ // check for special case
+ cmp64(rax, ExternalAddress((address) &min_long));
+ jcc(Assembler::notEqual, normal_case);
+ xorl(rdx, rdx); // prepare rdx for possible special case (where
+ // remainder = 0)
+ cmpq(reg, -1);
+ jcc(Assembler::equal, special_case);
+
+ // handle normal case
+ bind(normal_case);
+ cdqq();
+ int idivq_offset = offset();
+ idivq(reg);
+
+ // normal and special case exit
+ bind(special_case);
+
+ return idivq_offset;
+}
+
+void MacroAssembler::decrementq(Register reg, int value) {
+ if (value == min_jint) { subq(reg, value); return; }
+ if (value < 0) { incrementq(reg, -value); return; }
+ if (value == 0) { ; return; }
+ if (value == 1 && UseIncDec) { decq(reg) ; return; }
+ /* else */ { subq(reg, value) ; return; }
+}
+
+void MacroAssembler::decrementq(Address dst, int value) {
+ if (value == min_jint) { subq(dst, value); return; }
+ if (value < 0) { incrementq(dst, -value); return; }
+ if (value == 0) { ; return; }
+ if (value == 1 && UseIncDec) { decq(dst) ; return; }
+ /* else */ { subq(dst, value) ; return; }
+}
+
+void MacroAssembler::incrementq(Register reg, int value) {
+ if (value == min_jint) { addq(reg, value); return; }
+ if (value < 0) { decrementq(reg, -value); return; }
+ if (value == 0) { ; return; }
+ if (value == 1 && UseIncDec) { incq(reg) ; return; }
+ /* else */ { addq(reg, value) ; return; }
+}
+
+void MacroAssembler::incrementq(Address dst, int value) {
+ if (value == min_jint) { addq(dst, value); return; }
+ if (value < 0) { decrementq(dst, -value); return; }
+ if (value == 0) { ; return; }
+ if (value == 1 && UseIncDec) { incq(dst) ; return; }
+ /* else */ { addq(dst, value) ; return; }
+}
+
+// 32bit can do a case table jump in one instruction but we no longer allow the base
+// to be installed in the Address class
+void MacroAssembler::jump(ArrayAddress entry) {
+ lea(rscratch1, entry.base());
+ Address dispatch = entry.index();
+ assert(dispatch._base == noreg, "must be");
+ dispatch._base = rscratch1;
+ jmp(dispatch);
+}
+
+void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
+ ShouldNotReachHere(); // 64bit doesn't use two regs
+ cmpq(x_lo, y_lo);
+}
+
+void MacroAssembler::lea(Register dst, AddressLiteral src) {
+ mov_literal64(dst, (intptr_t)src.target(), src.rspec());
+}
+
+void MacroAssembler::lea(Address dst, AddressLiteral adr) {
+ mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec());
+ movptr(dst, rscratch1);
+}
+
+void MacroAssembler::leave() {
+ // %%% is this really better? Why not on 32bit too?
+ emit_byte(0xC9); // LEAVE
+}
+
+void MacroAssembler::lneg(Register hi, Register lo) {
+ ShouldNotReachHere(); // 64bit doesn't use two regs
+ negq(lo);
+}
+
+void MacroAssembler::movoop(Register dst, jobject obj) {
+ mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
+}
+
+void MacroAssembler::movoop(Address dst, jobject obj) {
+ mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate());
+ movq(dst, rscratch1);
+}
+
+void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
+ mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
+}
+
+void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
+ mov_literal64(rscratch1, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
+ movq(dst, rscratch1);
+}
+
+void MacroAssembler::movptr(Register dst, AddressLiteral src) {
+ if (src.is_lval()) {
+ mov_literal64(dst, (intptr_t)src.target(), src.rspec());
+ } else {
+ if (reachable(src)) {
+ movq(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ movq(dst, Address(rscratch1,0));
+ }
+ }
+}
+
+void MacroAssembler::movptr(ArrayAddress dst, Register src) {
+ movq(as_Address(dst), src);
+}
+
+void MacroAssembler::movptr(Register dst, ArrayAddress src) {
+ movq(dst, as_Address(src));
+}
+
+// src should NEVER be a real pointer. Use AddressLiteral for true pointers
+void MacroAssembler::movptr(Address dst, intptr_t src) {
+ mov64(rscratch1, src);
+ movq(dst, rscratch1);
+}
+
+// These are mostly for initializing NULL
+void MacroAssembler::movptr(Address dst, int32_t src) {
+ movslq(dst, src);
+}
+
+void MacroAssembler::movptr(Register dst, int32_t src) {
+ mov64(dst, (intptr_t)src);
+}
+
+void MacroAssembler::pushoop(jobject obj) {
+ movoop(rscratch1, obj);
+ push(rscratch1);
+}
+
+void MacroAssembler::pushklass(Metadata* obj) {
+ mov_metadata(rscratch1, obj);
+ push(rscratch1);
+}
+
+void MacroAssembler::pushptr(AddressLiteral src) {
+ lea(rscratch1, src);
+ if (src.is_lval()) {
+ push(rscratch1);
+ } else {
+ pushq(Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::reset_last_Java_frame(bool clear_fp,
+ bool clear_pc) {
+ // we must set sp to zero to clear frame
+ movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
+ // must clear fp, so that compiled frames are not confused; it is
+ // possible that we need it only for debugging
+ if (clear_fp) {
+ movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
+ }
+
+ if (clear_pc) {
+ movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
+ }
+}
+
+void MacroAssembler::set_last_Java_frame(Register last_java_sp,
+ Register last_java_fp,
+ address last_java_pc) {
+ // determine last_java_sp register
+ if (!last_java_sp->is_valid()) {
+ last_java_sp = rsp;
+ }
+
+ // last_java_fp is optional
+ if (last_java_fp->is_valid()) {
+ movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()),
+ last_java_fp);
+ }
+
+ // last_java_pc is optional
+ if (last_java_pc != NULL) {
+ Address java_pc(r15_thread,
+ JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
+ lea(rscratch1, InternalAddress(last_java_pc));
+ movptr(java_pc, rscratch1);
+ }
+
+ movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
+}
+
+static void pass_arg0(MacroAssembler* masm, Register arg) {
+ if (c_rarg0 != arg ) {
+ masm->mov(c_rarg0, arg);
+ }
+}
+
+static void pass_arg1(MacroAssembler* masm, Register arg) {
+ if (c_rarg1 != arg ) {
+ masm->mov(c_rarg1, arg);
+ }
+}
+
+static void pass_arg2(MacroAssembler* masm, Register arg) {
+ if (c_rarg2 != arg ) {
+ masm->mov(c_rarg2, arg);
+ }
+}
+
+static void pass_arg3(MacroAssembler* masm, Register arg) {
+ if (c_rarg3 != arg ) {
+ masm->mov(c_rarg3, arg);
+ }
+}
+
+void MacroAssembler::stop(const char* msg) {
+ address rip = pc();
+ pusha(); // get regs on stack
+ lea(c_rarg0, ExternalAddress((address) msg));
+ lea(c_rarg1, InternalAddress(rip));
+ movq(c_rarg2, rsp); // pass pointer to regs array
+ andq(rsp, -16); // align stack as required by ABI
+ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
+ hlt();
+}
+
+void MacroAssembler::warn(const char* msg) {
+ push(rbp);
+ movq(rbp, rsp);
+ andq(rsp, -16); // align stack as required by push_CPU_state and call
+ push_CPU_state(); // keeps alignment at 16 bytes
+ lea(c_rarg0, ExternalAddress((address) msg));
+ call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
+ pop_CPU_state();
+ mov(rsp, rbp);
+ pop(rbp);
+}
+
+void MacroAssembler::print_state() {
+ address rip = pc();
+ pusha(); // get regs on stack
+ push(rbp);
+ movq(rbp, rsp);
+ andq(rsp, -16); // align stack as required by push_CPU_state and call
+ push_CPU_state(); // keeps alignment at 16 bytes
+
+ lea(c_rarg0, InternalAddress(rip));
+ lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array
+ call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1);
+
+ pop_CPU_state();
+ mov(rsp, rbp);
+ pop(rbp);
+ popa();
+}
+
+#ifndef PRODUCT
+extern "C" void findpc(intptr_t x);
+#endif
+
+void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
+ // In order to get locks to work, we need to fake a in_VM state
+ if (ShowMessageBoxOnError) {
+ JavaThread* thread = JavaThread::current();
+ JavaThreadState saved_state = thread->thread_state();
+ thread->set_thread_state(_thread_in_vm);
+#ifndef PRODUCT
+ if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
+ ttyLocker ttyl;
+ BytecodeCounter::print();
+ }
+#endif
+ // To see where a verify_oop failed, get $ebx+40/X for this frame.
+ // XXX correct this offset for amd64
+ // This is the value of eip which points to where verify_oop will return.
+ if (os::message_box(msg, "Execution stopped, print registers?")) {
+ print_state64(pc, regs);
+ BREAKPOINT;
+ assert(false, "start up GDB");
+ }
+ ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
+ } else {
+ ttyLocker ttyl;
+ ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
+ msg);
+ assert(false, err_msg("DEBUG MESSAGE: %s", msg));
+ }
+}
+
+void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
+ ttyLocker ttyl;
+ FlagSetting fs(Debugging, true);
+ tty->print_cr("rip = 0x%016lx", pc);
+#ifndef PRODUCT
+ tty->cr();
+ findpc(pc);
+ tty->cr();
+#endif
+#define PRINT_REG(rax, value) \
+ { tty->print("%s = ", #rax); os::print_location(tty, value); }
+ PRINT_REG(rax, regs[15]);
+ PRINT_REG(rbx, regs[12]);
+ PRINT_REG(rcx, regs[14]);
+ PRINT_REG(rdx, regs[13]);
+ PRINT_REG(rdi, regs[8]);
+ PRINT_REG(rsi, regs[9]);
+ PRINT_REG(rbp, regs[10]);
+ PRINT_REG(rsp, regs[11]);
+ PRINT_REG(r8 , regs[7]);
+ PRINT_REG(r9 , regs[6]);
+ PRINT_REG(r10, regs[5]);
+ PRINT_REG(r11, regs[4]);
+ PRINT_REG(r12, regs[3]);
+ PRINT_REG(r13, regs[2]);
+ PRINT_REG(r14, regs[1]);
+ PRINT_REG(r15, regs[0]);
+#undef PRINT_REG
+ // Print some words near top of staack.
+ int64_t* rsp = (int64_t*) regs[11];
+ int64_t* dump_sp = rsp;
+ for (int col1 = 0; col1 < 8; col1++) {
+ tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
+ os::print_location(tty, *dump_sp++);
+ }
+ for (int row = 0; row < 25; row++) {
+ tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
+ for (int col = 0; col < 4; col++) {
+ tty->print(" 0x%016lx", *dump_sp++);
+ }
+ tty->cr();
+ }
+ // Print some instructions around pc:
+ Disassembler::decode((address)pc-64, (address)pc);
+ tty->print_cr("--------");
+ Disassembler::decode((address)pc, (address)pc+32);
+}
+
+#endif // _LP64
+
+// Now versions that are common to 32/64 bit
+
+void MacroAssembler::addptr(Register dst, int32_t imm32) {
+ LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32));
+}
+
+void MacroAssembler::addptr(Register dst, Register src) {
+ LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
+}
+
+void MacroAssembler::addptr(Address dst, Register src) {
+ LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
+}
+
+void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ Assembler::addsd(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::addsd(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::addss(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ addss(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ addss(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::align(int modulus) {
+ if (offset() % modulus != 0) {
+ nop(modulus - (offset() % modulus));
+ }
+}
+
+void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
+ // Used in sign-masking with aligned address.
+ assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
+ if (reachable(src)) {
+ Assembler::andpd(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::andpd(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::andps(XMMRegister dst, AddressLiteral src) {
+ // Used in sign-masking with aligned address.
+ assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
+ if (reachable(src)) {
+ Assembler::andps(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::andps(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::andptr(Register dst, int32_t imm32) {
+ LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
+}
+
+void MacroAssembler::atomic_incl(AddressLiteral counter_addr) {
+ pushf();
+ if (os::is_MP())
+ lock();
+ incrementl(counter_addr);
+ popf();
+}
+
+// Writes to stack successive pages until offset reached to check for
+// stack overflow + shadow pages. This clobbers tmp.
+void MacroAssembler::bang_stack_size(Register size, Register tmp) {
+ movptr(tmp, rsp);
+ // Bang stack for total size given plus shadow page size.
+ // Bang one page at a time because large size can bang beyond yellow and
+ // red zones.
+ Label loop;
+ bind(loop);
+ movl(Address(tmp, (-os::vm_page_size())), size );
+ subptr(tmp, os::vm_page_size());
+ subl(size, os::vm_page_size());
+ jcc(Assembler::greater, loop);
+
+ // Bang down shadow pages too.
+ // The -1 because we already subtracted 1 page.
+ for (int i = 0; i< StackShadowPages-1; i++) {
+ // this could be any sized move but this is can be a debugging crumb
+ // so the bigger the better.
+ movptr(Address(tmp, (-i*os::vm_page_size())), size );
+ }
+}
+
+void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
+ assert(UseBiasedLocking, "why call this otherwise?");
+
+ // Check for biased locking unlock case, which is a no-op
+ // Note: we do not have to check the thread ID for two reasons.
+ // First, the interpreter checks for IllegalMonitorStateException at
+ // a higher level. Second, if the bias was revoked while we held the
+ // lock, the object could not be rebiased toward another thread, so
+ // the bias bit would be clear.
+ movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
+ andptr(temp_reg, markOopDesc::biased_lock_mask_in_place);
+ cmpptr(temp_reg, markOopDesc::biased_lock_pattern);
+ jcc(Assembler::equal, done);
+}
+
+void MacroAssembler::c2bool(Register x) {
+ // implements x == 0 ? 0 : 1
+ // note: must only look at least-significant byte of x
+ // since C-style booleans are stored in one byte
+ // only! (was bug)
+ andl(x, 0xFF);
+ setb(Assembler::notZero, x);
+}
+
+// Wouldn't need if AddressLiteral version had new name
+void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
+ Assembler::call(L, rtype);
+}
+
+void MacroAssembler::call(Register entry) {
+ Assembler::call(entry);
+}
+
+void MacroAssembler::call(AddressLiteral entry) {
+ if (reachable(entry)) {
+ Assembler::call_literal(entry.target(), entry.rspec());
+ } else {
+ lea(rscratch1, entry);
+ Assembler::call(rscratch1);
+ }
+}
+
+void MacroAssembler::ic_call(address entry) {
+ RelocationHolder rh = virtual_call_Relocation::spec(pc());
+ movptr(rax, (intptr_t)Universe::non_oop_word());
+ call(AddressLiteral(entry, rh));
+}
+
+// Implementation of call_VM versions
+
+void MacroAssembler::call_VM(Register oop_result,
+ address entry_point,
+ bool check_exceptions) {
+ Label C, E;
+ call(C, relocInfo::none);
+ jmp(E);
+
+ bind(C);
+ call_VM_helper(oop_result, entry_point, 0, check_exceptions);
+ ret(0);
+
+ bind(E);
+}
+
+void MacroAssembler::call_VM(Register oop_result,
+ address entry_point,
+ Register arg_1,
+ bool check_exceptions) {
+ Label C, E;
+ call(C, relocInfo::none);
+ jmp(E);
+
+ bind(C);
+ pass_arg1(this, arg_1);
+ call_VM_helper(oop_result, entry_point, 1, check_exceptions);
+ ret(0);
+
+ bind(E);
+}
+
+void MacroAssembler::call_VM(Register oop_result,
+ address entry_point,
+ Register arg_1,
+ Register arg_2,
+ bool check_exceptions) {
+ Label C, E;
+ call(C, relocInfo::none);
+ jmp(E);
+
+ bind(C);
+
+ LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+
+ pass_arg2(this, arg_2);
+ pass_arg1(this, arg_1);
+ call_VM_helper(oop_result, entry_point, 2, check_exceptions);
+ ret(0);
+
+ bind(E);
+}
+
+void MacroAssembler::call_VM(Register oop_result,
+ address entry_point,
+ Register arg_1,
+ Register arg_2,
+ Register arg_3,
+ bool check_exceptions) {
+ Label C, E;
+ call(C, relocInfo::none);
+ jmp(E);
+
+ bind(C);
+
+ LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
+ LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
+ pass_arg3(this, arg_3);
+
+ LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+ pass_arg2(this, arg_2);
+
+ pass_arg1(this, arg_1);
+ call_VM_helper(oop_result, entry_point, 3, check_exceptions);
+ ret(0);
+
+ bind(E);
+}
+
+void MacroAssembler::call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ int number_of_arguments,
+ bool check_exceptions) {
+ Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
+ call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
+}
+
+void MacroAssembler::call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ Register arg_1,
+ bool check_exceptions) {
+ pass_arg1(this, arg_1);
+ call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
+}
+
+void MacroAssembler::call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ Register arg_1,
+ Register arg_2,
+ bool check_exceptions) {
+
+ LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+ pass_arg2(this, arg_2);
+ pass_arg1(this, arg_1);
+ call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
+}
+
+void MacroAssembler::call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ Register arg_1,
+ Register arg_2,
+ Register arg_3,
+ bool check_exceptions) {
+ LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
+ LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
+ pass_arg3(this, arg_3);
+ LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+ pass_arg2(this, arg_2);
+ pass_arg1(this, arg_1);
+ call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
+}
+
+void MacroAssembler::super_call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ int number_of_arguments,
+ bool check_exceptions) {
+ Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
+ MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
+}
+
+void MacroAssembler::super_call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ Register arg_1,
+ bool check_exceptions) {
+ pass_arg1(this, arg_1);
+ super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
+}
+
+void MacroAssembler::super_call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ Register arg_1,
+ Register arg_2,
+ bool check_exceptions) {
+
+ LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+ pass_arg2(this, arg_2);
+ pass_arg1(this, arg_1);
+ super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
+}
+
+void MacroAssembler::super_call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ Register arg_1,
+ Register arg_2,
+ Register arg_3,
+ bool check_exceptions) {
+ LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
+ LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
+ pass_arg3(this, arg_3);
+ LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+ pass_arg2(this, arg_2);
+ pass_arg1(this, arg_1);
+ super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
+}
+
+void MacroAssembler::call_VM_base(Register oop_result,
+ Register java_thread,
+ Register last_java_sp,
+ address entry_point,
+ int number_of_arguments,
+ bool check_exceptions) {
+ // determine java_thread register
+ if (!java_thread->is_valid()) {
+#ifdef _LP64
+ java_thread = r15_thread;
+#else
+ java_thread = rdi;
+ get_thread(java_thread);
+#endif // LP64
+ }
+ // determine last_java_sp register
+ if (!last_java_sp->is_valid()) {
+ last_java_sp = rsp;
+ }
+ // debugging support
+ assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
+ LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
+#ifdef ASSERT
+ // TraceBytecodes does not use r12 but saves it over the call, so don't verify
+ // r12 is the heapbase.
+ LP64_ONLY(if ((UseCompressedOops || UseCompressedKlassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
+#endif // ASSERT
+
+ assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
+ assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
+
+ // push java thread (becomes first argument of C function)
+
+ NOT_LP64(push(java_thread); number_of_arguments++);
+ LP64_ONLY(mov(c_rarg0, r15_thread));
+
+ // set last Java frame before call
+ assert(last_java_sp != rbp, "can't use ebp/rbp");
+
+ // Only interpreter should have to set fp
+ set_last_Java_frame(java_thread, last_java_sp, rbp, NULL);
+
+ // do the call, remove parameters
+ MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
+
+ // restore the thread (cannot use the pushed argument since arguments
+ // may be overwritten by C code generated by an optimizing compiler);
+ // however can use the register value directly if it is callee saved.
+ if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) {
+ // rdi & rsi (also r15) are callee saved -> nothing to do
+#ifdef ASSERT
+ guarantee(java_thread != rax, "change this code");
+ push(rax);
+ { Label L;
+ get_thread(rax);
+ cmpptr(java_thread, rax);
+ jcc(Assembler::equal, L);
+ STOP("MacroAssembler::call_VM_base: rdi not callee saved?");
+ bind(L);
+ }
+ pop(rax);
+#endif
+ } else {
+ get_thread(java_thread);
+ }
+ // reset last Java frame
+ // Only interpreter should have to clear fp
+ reset_last_Java_frame(java_thread, true, false);
+
+#ifndef CC_INTERP
+ // C++ interp handles this in the interpreter
+ check_and_handle_popframe(java_thread);
+ check_and_handle_earlyret(java_thread);
+#endif /* CC_INTERP */
+
+ if (check_exceptions) {
+ // check for pending exceptions (java_thread is set upon return)
+ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
+#ifndef _LP64
+ jump_cc(Assembler::notEqual,
+ RuntimeAddress(StubRoutines::forward_exception_entry()));
+#else
+ // This used to conditionally jump to forward_exception however it is
+ // possible if we relocate that the branch will not reach. So we must jump
+ // around so we can always reach
+
+ Label ok;
+ jcc(Assembler::equal, ok);
+ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
+ bind(ok);
+#endif // LP64
+ }
+
+ // get oop result if there is one and reset the value in the thread
+ if (oop_result->is_valid()) {
+ get_vm_result(oop_result, java_thread);
+ }
+}
+
+void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
+
+ // Calculate the value for last_Java_sp
+ // somewhat subtle. call_VM does an intermediate call
+ // which places a return address on the stack just under the
+ // stack pointer as the user finsihed with it. This allows
+ // use to retrieve last_Java_pc from last_Java_sp[-1].
+ // On 32bit we then have to push additional args on the stack to accomplish
+ // the actual requested call. On 64bit call_VM only can use register args
+ // so the only extra space is the return address that call_VM created.
+ // This hopefully explains the calculations here.
+
+#ifdef _LP64
+ // We've pushed one address, correct last_Java_sp
+ lea(rax, Address(rsp, wordSize));
+#else
+ lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize));
+#endif // LP64
+
+ call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions);
+
+}
+
+void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
+ call_VM_leaf_base(entry_point, number_of_arguments);
+}
+
+void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
+ pass_arg0(this, arg_0);
+ call_VM_leaf(entry_point, 1);
+}
+
+void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
+
+ LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
+ pass_arg1(this, arg_1);
+ pass_arg0(this, arg_0);
+ call_VM_leaf(entry_point, 2);
+}
+
+void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
+ LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
+ LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+ pass_arg2(this, arg_2);
+ LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
+ pass_arg1(this, arg_1);
+ pass_arg0(this, arg_0);
+ call_VM_leaf(entry_point, 3);
+}
+
+void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
+ pass_arg0(this, arg_0);
+ MacroAssembler::call_VM_leaf_base(entry_point, 1);
+}
+
+void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
+
+ LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
+ pass_arg1(this, arg_1);
+ pass_arg0(this, arg_0);
+ MacroAssembler::call_VM_leaf_base(entry_point, 2);
+}
+
+void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
+ LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
+ LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+ pass_arg2(this, arg_2);
+ LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
+ pass_arg1(this, arg_1);
+ pass_arg0(this, arg_0);
+ MacroAssembler::call_VM_leaf_base(entry_point, 3);
+}
+
+void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
+ LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg"));
+ LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
+ LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
+ pass_arg3(this, arg_3);
+ LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
+ LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+ pass_arg2(this, arg_2);
+ LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
+ pass_arg1(this, arg_1);
+ pass_arg0(this, arg_0);
+ MacroAssembler::call_VM_leaf_base(entry_point, 4);
+}
+
+void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
+ movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
+ movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD);
+ verify_oop(oop_result, "broken oop in call_VM_base");
+}
+
+void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
+ movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
+ movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD);
+}
+
+void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
+}
+
+void MacroAssembler::check_and_handle_popframe(Register java_thread) {
+}
+
+void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) {
+ if (reachable(src1)) {
+ cmpl(as_Address(src1), imm);
+ } else {
+ lea(rscratch1, src1);
+ cmpl(Address(rscratch1, 0), imm);
+ }
+}
+
+void MacroAssembler::cmp32(Register src1, AddressLiteral src2) {
+ assert(!src2.is_lval(), "use cmpptr");
+ if (reachable(src2)) {
+ cmpl(src1, as_Address(src2));
+ } else {
+ lea(rscratch1, src2);
+ cmpl(src1, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::cmp32(Register src1, int32_t imm) {
+ Assembler::cmpl(src1, imm);
+}
+
+void MacroAssembler::cmp32(Register src1, Address src2) {
+ Assembler::cmpl(src1, src2);
+}
+
+void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
+ ucomisd(opr1, opr2);
+
+ Label L;
+ if (unordered_is_less) {
+ movl(dst, -1);
+ jcc(Assembler::parity, L);
+ jcc(Assembler::below , L);
+ movl(dst, 0);
+ jcc(Assembler::equal , L);
+ increment(dst);
+ } else { // unordered is greater
+ movl(dst, 1);
+ jcc(Assembler::parity, L);
+ jcc(Assembler::above , L);
+ movl(dst, 0);
+ jcc(Assembler::equal , L);
+ decrementl(dst);
+ }
+ bind(L);
+}
+
+void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
+ ucomiss(opr1, opr2);
+
+ Label L;
+ if (unordered_is_less) {
+ movl(dst, -1);
+ jcc(Assembler::parity, L);
+ jcc(Assembler::below , L);
+ movl(dst, 0);
+ jcc(Assembler::equal , L);
+ increment(dst);
+ } else { // unordered is greater
+ movl(dst, 1);
+ jcc(Assembler::parity, L);
+ jcc(Assembler::above , L);
+ movl(dst, 0);
+ jcc(Assembler::equal , L);
+ decrementl(dst);
+ }
+ bind(L);
+}
+
+
+void MacroAssembler::cmp8(AddressLiteral src1, int imm) {
+ if (reachable(src1)) {
+ cmpb(as_Address(src1), imm);
+ } else {
+ lea(rscratch1, src1);
+ cmpb(Address(rscratch1, 0), imm);
+ }
+}
+
+void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) {
+#ifdef _LP64
+ if (src2.is_lval()) {
+ movptr(rscratch1, src2);
+ Assembler::cmpq(src1, rscratch1);
+ } else if (reachable(src2)) {
+ cmpq(src1, as_Address(src2));
+ } else {
+ lea(rscratch1, src2);
+ Assembler::cmpq(src1, Address(rscratch1, 0));
+ }
+#else
+ if (src2.is_lval()) {
+ cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
+ } else {
+ cmpl(src1, as_Address(src2));
+ }
+#endif // _LP64
+}
+
+void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) {
+ assert(src2.is_lval(), "not a mem-mem compare");
+#ifdef _LP64
+ // moves src2's literal address
+ movptr(rscratch1, src2);
+ Assembler::cmpq(src1, rscratch1);
+#else
+ cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
+#endif // _LP64
+}
+
+void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) {
+ if (reachable(adr)) {
+ if (os::is_MP())
+ lock();
+ cmpxchgptr(reg, as_Address(adr));
+ } else {
+ lea(rscratch1, adr);
+ if (os::is_MP())
+ lock();
+ cmpxchgptr(reg, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
+ LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr));
+}
+
+void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ Assembler::comisd(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::comisd(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ Assembler::comiss(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::comiss(dst, Address(rscratch1, 0));
+ }
+}
+
+
+void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
+ Condition negated_cond = negate_condition(cond);
+ Label L;
+ jcc(negated_cond, L);
+ atomic_incl(counter_addr);
+ bind(L);
+}
+
+int MacroAssembler::corrected_idivl(Register reg) {
+ // Full implementation of Java idiv and irem; checks for
+ // special case as described in JVM spec., p.243 & p.271.
+ // The function returns the (pc) offset of the idivl
+ // instruction - may be needed for implicit exceptions.
+ //
+ // normal case special case
+ //
+ // input : rax,: dividend min_int
+ // reg: divisor (may not be rax,/rdx) -1
+ //
+ // output: rax,: quotient (= rax, idiv reg) min_int
+ // rdx: remainder (= rax, irem reg) 0
+ assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
+ const int min_int = 0x80000000;
+ Label normal_case, special_case;
+
+ // check for special case
+ cmpl(rax, min_int);
+ jcc(Assembler::notEqual, normal_case);
+ xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
+ cmpl(reg, -1);
+ jcc(Assembler::equal, special_case);
+
+ // handle normal case
+ bind(normal_case);
+ cdql();
+ int idivl_offset = offset();
+ idivl(reg);
+
+ // normal and special case exit
+ bind(special_case);
+
+ return idivl_offset;
+}
+
+
+
+void MacroAssembler::decrementl(Register reg, int value) {
+ if (value == min_jint) {subl(reg, value) ; return; }
+ if (value < 0) { incrementl(reg, -value); return; }
+ if (value == 0) { ; return; }
+ if (value == 1 && UseIncDec) { decl(reg) ; return; }
+ /* else */ { subl(reg, value) ; return; }
+}
+
+void MacroAssembler::decrementl(Address dst, int value) {
+ if (value == min_jint) {subl(dst, value) ; return; }
+ if (value < 0) { incrementl(dst, -value); return; }
+ if (value == 0) { ; return; }
+ if (value == 1 && UseIncDec) { decl(dst) ; return; }
+ /* else */ { subl(dst, value) ; return; }
+}
+
+void MacroAssembler::division_with_shift (Register reg, int shift_value) {
+ assert (shift_value > 0, "illegal shift value");
+ Label _is_positive;
+ testl (reg, reg);
+ jcc (Assembler::positive, _is_positive);
+ int offset = (1 << shift_value) - 1 ;
+
+ if (offset == 1) {
+ incrementl(reg);
+ } else {
+ addl(reg, offset);
+ }
+
+ bind (_is_positive);
+ sarl(reg, shift_value);
+}
+
+void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ Assembler::divsd(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::divsd(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::divss(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ Assembler::divss(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::divss(dst, Address(rscratch1, 0));
+ }
+}
+
+// !defined(COMPILER2) is because of stupid core builds
+#if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
+void MacroAssembler::empty_FPU_stack() {
+ if (VM_Version::supports_mmx()) {
+ emms();
+ } else {
+ for (int i = 8; i-- > 0; ) ffree(i);
+ }
+}
+#endif // !LP64 || C1 || !C2
+
+
+// Defines obj, preserves var_size_in_bytes
+void MacroAssembler::eden_allocate(Register obj,
+ Register var_size_in_bytes,
+ int con_size_in_bytes,
+ Register t1,
+ Label& slow_case) {
+ assert(obj == rax, "obj must be in rax, for cmpxchg");
+ assert_different_registers(obj, var_size_in_bytes, t1);
+ if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
+ jmp(slow_case);
+ } else {
+ Register end = t1;
+ Label retry;
+ bind(retry);
+ ExternalAddress heap_top((address) Universe::heap()->top_addr());
+ movptr(obj, heap_top);
+ if (var_size_in_bytes == noreg) {
+ lea(end, Address(obj, con_size_in_bytes));
+ } else {
+ lea(end, Address(obj, var_size_in_bytes, Address::times_1));
+ }
+ // if end < obj then we wrapped around => object too long => slow case
+ cmpptr(end, obj);
+ jcc(Assembler::below, slow_case);
+ cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
+ jcc(Assembler::above, slow_case);
+ // Compare obj with the top addr, and if still equal, store the new top addr in
+ // end at the address of the top addr pointer. Sets ZF if was equal, and clears
+ // it otherwise. Use lock prefix for atomicity on MPs.
+ locked_cmpxchgptr(end, heap_top);
+ jcc(Assembler::notEqual, retry);
+ }
+}
+
+void MacroAssembler::enter() {
+ push(rbp);
+ mov(rbp, rsp);
+}
+
+// A 5 byte nop that is safe for patching (see patch_verified_entry)
+void MacroAssembler::fat_nop() {
+ if (UseAddressNop) {
+ addr_nop_5();
+ } else {
+ emit_byte(0x26); // es:
+ emit_byte(0x2e); // cs:
+ emit_byte(0x64); // fs:
+ emit_byte(0x65); // gs:
+ emit_byte(0x90);
+ }
+}
+
+void MacroAssembler::fcmp(Register tmp) {
+ fcmp(tmp, 1, true, true);
+}
+
+void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) {
+ assert(!pop_right || pop_left, "usage error");
+ if (VM_Version::supports_cmov()) {
+ assert(tmp == noreg, "unneeded temp");
+ if (pop_left) {
+ fucomip(index);
+ } else {
+ fucomi(index);
+ }
+ if (pop_right) {
+ fpop();
+ }
+ } else {
+ assert(tmp != noreg, "need temp");
+ if (pop_left) {
+ if (pop_right) {
+ fcompp();
+ } else {
+ fcomp(index);
+ }
+ } else {
+ fcom(index);
+ }
+ // convert FPU condition into eflags condition via rax,
+ save_rax(tmp);
+ fwait(); fnstsw_ax();
+ sahf();
+ restore_rax(tmp);
+ }
+ // condition codes set as follows:
+ //
+ // CF (corresponds to C0) if x < y
+ // PF (corresponds to C2) if unordered
+ // ZF (corresponds to C3) if x = y
+}
+
+void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) {
+ fcmp2int(dst, unordered_is_less, 1, true, true);
+}
+
+void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) {
+ fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right);
+ Label L;
+ if (unordered_is_less) {
+ movl(dst, -1);
+ jcc(Assembler::parity, L);
+ jcc(Assembler::below , L);
+ movl(dst, 0);
+ jcc(Assembler::equal , L);
+ increment(dst);
+ } else { // unordered is greater
+ movl(dst, 1);
+ jcc(Assembler::parity, L);
+ jcc(Assembler::above , L);
+ movl(dst, 0);
+ jcc(Assembler::equal , L);
+ decrementl(dst);
+ }
+ bind(L);
+}
+
+void MacroAssembler::fld_d(AddressLiteral src) {
+ fld_d(as_Address(src));
+}
+
+void MacroAssembler::fld_s(AddressLiteral src) {
+ fld_s(as_Address(src));
+}
+
+void MacroAssembler::fld_x(AddressLiteral src) {
+ Assembler::fld_x(as_Address(src));
+}
+
+void MacroAssembler::fldcw(AddressLiteral src) {
+ Assembler::fldcw(as_Address(src));
+}
+
+void MacroAssembler::pow_exp_core_encoding() {
+ // kills rax, rcx, rdx
+ subptr(rsp,sizeof(jdouble));
+ // computes 2^X. Stack: X ...
+ // f2xm1 computes 2^X-1 but only operates on -1<=X<=1. Get int(X) and
+ // keep it on the thread's stack to compute 2^int(X) later
+ // then compute 2^(X-int(X)) as (2^(X-int(X)-1+1)
+ // final result is obtained with: 2^X = 2^int(X) * 2^(X-int(X))
+ fld_s(0); // Stack: X X ...
+ frndint(); // Stack: int(X) X ...
+ fsuba(1); // Stack: int(X) X-int(X) ...
+ fistp_s(Address(rsp,0)); // move int(X) as integer to thread's stack. Stack: X-int(X) ...
+ f2xm1(); // Stack: 2^(X-int(X))-1 ...
+ fld1(); // Stack: 1 2^(X-int(X))-1 ...
+ faddp(1); // Stack: 2^(X-int(X))
+ // computes 2^(int(X)): add exponent bias (1023) to int(X), then
+ // shift int(X)+1023 to exponent position.
+ // Exponent is limited to 11 bits if int(X)+1023 does not fit in 11
+ // bits, set result to NaN. 0x000 and 0x7FF are reserved exponent
+ // values so detect them and set result to NaN.
+ movl(rax,Address(rsp,0));
+ movl(rcx, -2048); // 11 bit mask and valid NaN binary encoding
+ addl(rax, 1023);
+ movl(rdx,rax);
+ shll(rax,20);
+ // Check that 0 < int(X)+1023 < 2047. Otherwise set rax to NaN.
+ addl(rdx,1);
+ // Check that 1 < int(X)+1023+1 < 2048
+ // in 3 steps:
+ // 1- (int(X)+1023+1)&-2048 == 0 => 0 <= int(X)+1023+1 < 2048
+ // 2- (int(X)+1023+1)&-2048 != 0
+ // 3- (int(X)+1023+1)&-2048 != 1
+ // Do 2- first because addl just updated the flags.
+ cmov32(Assembler::equal,rax,rcx);
+ cmpl(rdx,1);
+ cmov32(Assembler::equal,rax,rcx);
+ testl(rdx,rcx);
+ cmov32(Assembler::notEqual,rax,rcx);
+ movl(Address(rsp,4),rax);
+ movl(Address(rsp,0),0);
+ fmul_d(Address(rsp,0)); // Stack: 2^X ...
+ addptr(rsp,sizeof(jdouble));
+}
+
+void MacroAssembler::increase_precision() {
+ subptr(rsp, BytesPerWord);
+ fnstcw(Address(rsp, 0));
+ movl(rax, Address(rsp, 0));
+ orl(rax, 0x300);
+ push(rax);
+ fldcw(Address(rsp, 0));
+ pop(rax);
+}
+
+void MacroAssembler::restore_precision() {
+ fldcw(Address(rsp, 0));
+ addptr(rsp, BytesPerWord);
+}
+
+void MacroAssembler::fast_pow() {
+ // computes X^Y = 2^(Y * log2(X))
+ // if fast computation is not possible, result is NaN. Requires
+ // fallback from user of this macro.
+ // increase precision for intermediate steps of the computation
+ increase_precision();
+ fyl2x(); // Stack: (Y*log2(X)) ...
+ pow_exp_core_encoding(); // Stack: exp(X) ...
+ restore_precision();
+}
+
+void MacroAssembler::fast_exp() {
+ // computes exp(X) = 2^(X * log2(e))
+ // if fast computation is not possible, result is NaN. Requires
+ // fallback from user of this macro.
+ // increase precision for intermediate steps of the computation
+ increase_precision();
+ fldl2e(); // Stack: log2(e) X ...
+ fmulp(1); // Stack: (X*log2(e)) ...
+ pow_exp_core_encoding(); // Stack: exp(X) ...
+ restore_precision();
+}
+
+void MacroAssembler::pow_or_exp(bool is_exp, int num_fpu_regs_in_use) {
+ // kills rax, rcx, rdx
+ // pow and exp needs 2 extra registers on the fpu stack.
+ Label slow_case, done;
+ Register tmp = noreg;
+ if (!VM_Version::supports_cmov()) {
+ // fcmp needs a temporary so preserve rdx,
+ tmp = rdx;
+ }
+ Register tmp2 = rax;
+ Register tmp3 = rcx;
+
+ if (is_exp) {
+ // Stack: X
+ fld_s(0); // duplicate argument for runtime call. Stack: X X
+ fast_exp(); // Stack: exp(X) X
+ fcmp(tmp, 0, false, false); // Stack: exp(X) X
+ // exp(X) not equal to itself: exp(X) is NaN go to slow case.
+ jcc(Assembler::parity, slow_case);
+ // get rid of duplicate argument. Stack: exp(X)
+ if (num_fpu_regs_in_use > 0) {
+ fxch();
+ fpop();
+ } else {
+ ffree(1);
+ }
+ jmp(done);
+ } else {
+ // Stack: X Y
+ Label x_negative, y_odd;
+
+ fldz(); // Stack: 0 X Y
+ fcmp(tmp, 1, true, false); // Stack: X Y
+ jcc(Assembler::above, x_negative);
+
+ // X >= 0
+
+ fld_s(1); // duplicate arguments for runtime call. Stack: Y X Y
+ fld_s(1); // Stack: X Y X Y
+ fast_pow(); // Stack: X^Y X Y
+ fcmp(tmp, 0, false, false); // Stack: X^Y X Y
+ // X^Y not equal to itself: X^Y is NaN go to slow case.
+ jcc(Assembler::parity, slow_case);
+ // get rid of duplicate arguments. Stack: X^Y
+ if (num_fpu_regs_in_use > 0) {
+ fxch(); fpop();
+ fxch(); fpop();
+ } else {
+ ffree(2);
+ ffree(1);
+ }
+ jmp(done);
+
+ // X <= 0
+ bind(x_negative);
+
+ fld_s(1); // Stack: Y X Y
+ frndint(); // Stack: int(Y) X Y
+ fcmp(tmp, 2, false, false); // Stack: int(Y) X Y
+ jcc(Assembler::notEqual, slow_case);
+
+ subptr(rsp, 8);
+
+ // For X^Y, when X < 0, Y has to be an integer and the final
+ // result depends on whether it's odd or even. We just checked
+ // that int(Y) == Y. We move int(Y) to gp registers as a 64 bit
+ // integer to test its parity. If int(Y) is huge and doesn't fit
+ // in the 64 bit integer range, the integer indefinite value will
+ // end up in the gp registers. Huge numbers are all even, the
+ // integer indefinite number is even so it's fine.
+
+#ifdef ASSERT
+ // Let's check we don't end up with an integer indefinite number
+ // when not expected. First test for huge numbers: check whether
+ // int(Y)+1 == int(Y) which is true for very large numbers and
+ // those are all even. A 64 bit integer is guaranteed to not
+ // overflow for numbers where y+1 != y (when precision is set to
+ // double precision).
+ Label y_not_huge;
+
+ fld1(); // Stack: 1 int(Y) X Y
+ fadd(1); // Stack: 1+int(Y) int(Y) X Y
+
+#ifdef _LP64
+ // trip to memory to force the precision down from double extended
+ // precision
+ fstp_d(Address(rsp, 0));
+ fld_d(Address(rsp, 0));
+#endif
+
+ fcmp(tmp, 1, true, false); // Stack: int(Y) X Y
+#endif
+
+ // move int(Y) as 64 bit integer to thread's stack
+ fistp_d(Address(rsp,0)); // Stack: X Y
+
+#ifdef ASSERT
+ jcc(Assembler::notEqual, y_not_huge);
+
+ // Y is huge so we know it's even. It may not fit in a 64 bit
+ // integer and we don't want the debug code below to see the
+ // integer indefinite value so overwrite int(Y) on the thread's
+ // stack with 0.
+ movl(Address(rsp, 0), 0);
+ movl(Address(rsp, 4), 0);
+
+ bind(y_not_huge);
+#endif
+
+ fld_s(1); // duplicate arguments for runtime call. Stack: Y X Y
+ fld_s(1); // Stack: X Y X Y
+ fabs(); // Stack: abs(X) Y X Y
+ fast_pow(); // Stack: abs(X)^Y X Y
+ fcmp(tmp, 0, false, false); // Stack: abs(X)^Y X Y
+ // abs(X)^Y not equal to itself: abs(X)^Y is NaN go to slow case.
+
+ pop(tmp2);
+ NOT_LP64(pop(tmp3));
+ jcc(Assembler::parity, slow_case);
+
+#ifdef ASSERT
+ // Check that int(Y) is not integer indefinite value (int
+ // overflow). Shouldn't happen because for values that would
+ // overflow, 1+int(Y)==Y which was tested earlier.
+#ifndef _LP64
+ {
+ Label integer;
+ testl(tmp2, tmp2);
+ jcc(Assembler::notZero, integer);
+ cmpl(tmp3, 0x80000000);
+ jcc(Assembler::notZero, integer);
+ STOP("integer indefinite value shouldn't be seen here");
+ bind(integer);
+ }
+#else
+ {
+ Label integer;
+ mov(tmp3, tmp2); // preserve tmp2 for parity check below
+ shlq(tmp3, 1);
+ jcc(Assembler::carryClear, integer);
+ jcc(Assembler::notZero, integer);
+ STOP("integer indefinite value shouldn't be seen here");
+ bind(integer);
+ }
+#endif
+#endif
+
+ // get rid of duplicate arguments. Stack: X^Y
+ if (num_fpu_regs_in_use > 0) {
+ fxch(); fpop();
+ fxch(); fpop();
+ } else {
+ ffree(2);
+ ffree(1);
+ }
+
+ testl(tmp2, 1);
+ jcc(Assembler::zero, done); // X <= 0, Y even: X^Y = abs(X)^Y
+ // X <= 0, Y even: X^Y = -abs(X)^Y
+
+ fchs(); // Stack: -abs(X)^Y Y
+ jmp(done);
+ }
+
+ // slow case: runtime call
+ bind(slow_case);
+
+ fpop(); // pop incorrect result or int(Y)
+
+ fp_runtime_fallback(is_exp ? CAST_FROM_FN_PTR(address, SharedRuntime::dexp) : CAST_FROM_FN_PTR(address, SharedRuntime::dpow),
+ is_exp ? 1 : 2, num_fpu_regs_in_use);
+
+ // Come here with result in F-TOS
+ bind(done);
+}
+
+void MacroAssembler::fpop() {
+ ffree();
+ fincstp();
+}
+
+void MacroAssembler::fremr(Register tmp) {
+ save_rax(tmp);
+ { Label L;
+ bind(L);
+ fprem();
+ fwait(); fnstsw_ax();
+#ifdef _LP64
+ testl(rax, 0x400);
+ jcc(Assembler::notEqual, L);
+#else
+ sahf();
+ jcc(Assembler::parity, L);
+#endif // _LP64
+ }
+ restore_rax(tmp);
+ // Result is in ST0.
+ // Note: fxch & fpop to get rid of ST1
+ // (otherwise FPU stack could overflow eventually)
+ fxch(1);
+ fpop();
+}
+
+
+void MacroAssembler::incrementl(AddressLiteral dst) {
+ if (reachable(dst)) {
+ incrementl(as_Address(dst));
+ } else {
+ lea(rscratch1, dst);
+ incrementl(Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::incrementl(ArrayAddress dst) {
+ incrementl(as_Address(dst));
+}
+
+void MacroAssembler::incrementl(Register reg, int value) {
+ if (value == min_jint) {addl(reg, value) ; return; }
+ if (value < 0) { decrementl(reg, -value); return; }
+ if (value == 0) { ; return; }
+ if (value == 1 && UseIncDec) { incl(reg) ; return; }
+ /* else */ { addl(reg, value) ; return; }
+}
+
+void MacroAssembler::incrementl(Address dst, int value) {
+ if (value == min_jint) {addl(dst, value) ; return; }
+ if (value < 0) { decrementl(dst, -value); return; }
+ if (value == 0) { ; return; }
+ if (value == 1 && UseIncDec) { incl(dst) ; return; }
+ /* else */ { addl(dst, value) ; return; }
+}
+
+void MacroAssembler::jump(AddressLiteral dst) {
+ if (reachable(dst)) {
+ jmp_literal(dst.target(), dst.rspec());
+ } else {
+ lea(rscratch1, dst);
+ jmp(rscratch1);
+ }
+}
+
+void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
+ if (reachable(dst)) {
+ InstructionMark im(this);
+ relocate(dst.reloc());
+ const int short_size = 2;
+ const int long_size = 6;
+ int offs = (intptr_t)dst.target() - ((intptr_t)pc());
+ if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
+ // 0111 tttn #8-bit disp
+ emit_byte(0x70 | cc);
+ emit_byte((offs - short_size) & 0xFF);
+ } else {
+ // 0000 1111 1000 tttn #32-bit disp
+ emit_byte(0x0F);
+ emit_byte(0x80 | cc);
+ emit_long(offs - long_size);
+ }
+ } else {
+#ifdef ASSERT
+ warning("reversing conditional branch");
+#endif /* ASSERT */
+ Label skip;
+ jccb(reverse[cc], skip);
+ lea(rscratch1, dst);
+ Assembler::jmp(rscratch1);
+ bind(skip);
+ }
+}
+
+void MacroAssembler::ldmxcsr(AddressLiteral src) {
+ if (reachable(src)) {
+ Assembler::ldmxcsr(as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::ldmxcsr(Address(rscratch1, 0));
+ }
+}
+
+int MacroAssembler::load_signed_byte(Register dst, Address src) {
+ int off;
+ if (LP64_ONLY(true ||) VM_Version::is_P6()) {
+ off = offset();
+ movsbl(dst, src); // movsxb
+ } else {
+ off = load_unsigned_byte(dst, src);
+ shll(dst, 24);
+ sarl(dst, 24);
+ }
+ return off;
+}
+
+// Note: load_signed_short used to be called load_signed_word.
+// Although the 'w' in x86 opcodes refers to the term "word" in the assembler
+// manual, which means 16 bits, that usage is found nowhere in HotSpot code.
+// The term "word" in HotSpot means a 32- or 64-bit machine word.
+int MacroAssembler::load_signed_short(Register dst, Address src) {
+ int off;
+ if (LP64_ONLY(true ||) VM_Version::is_P6()) {
+ // This is dubious to me since it seems safe to do a signed 16 => 64 bit
+ // version but this is what 64bit has always done. This seems to imply
+ // that users are only using 32bits worth.
+ off = offset();
+ movswl(dst, src); // movsxw
+ } else {
+ off = load_unsigned_short(dst, src);
+ shll(dst, 16);
+ sarl(dst, 16);
+ }
+ return off;
+}
+
+int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
+ // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
+ // and "3.9 Partial Register Penalties", p. 22).
+ int off;
+ if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) {
+ off = offset();
+ movzbl(dst, src); // movzxb
+ } else {
+ xorl(dst, dst);
+ off = offset();
+ movb(dst, src);
+ }
+ return off;
+}
+
+// Note: load_unsigned_short used to be called load_unsigned_word.
+int MacroAssembler::load_unsigned_short(Register dst, Address src) {
+ // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
+ // and "3.9 Partial Register Penalties", p. 22).
+ int off;
+ if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) {
+ off = offset();
+ movzwl(dst, src); // movzxw
+ } else {
+ xorl(dst, dst);
+ off = offset();
+ movw(dst, src);
+ }
+ return off;
+}
+
+void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
+ switch (size_in_bytes) {
+#ifndef _LP64
+ case 8:
+ assert(dst2 != noreg, "second dest register required");
+ movl(dst, src);
+ movl(dst2, src.plus_disp(BytesPerInt));
+ break;
+#else
+ case 8: movq(dst, src); break;
+#endif
+ case 4: movl(dst, src); break;
+ case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
+ case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
+ default: ShouldNotReachHere();
+ }
+}
+
+void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
+ switch (size_in_bytes) {
+#ifndef _LP64
+ case 8:
+ assert(src2 != noreg, "second source register required");
+ movl(dst, src);
+ movl(dst.plus_disp(BytesPerInt), src2);
+ break;
+#else
+ case 8: movq(dst, src); break;
+#endif
+ case 4: movl(dst, src); break;
+ case 2: movw(dst, src); break;
+ case 1: movb(dst, src); break;
+ default: ShouldNotReachHere();
+ }
+}
+
+void MacroAssembler::mov32(AddressLiteral dst, Register src) {
+ if (reachable(dst)) {
+ movl(as_Address(dst), src);
+ } else {
+ lea(rscratch1, dst);
+ movl(Address(rscratch1, 0), src);
+ }
+}
+
+void MacroAssembler::mov32(Register dst, AddressLiteral src) {
+ if (reachable(src)) {
+ movl(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ movl(dst, Address(rscratch1, 0));
+ }
+}
+
+// C++ bool manipulation
+
+void MacroAssembler::movbool(Register dst, Address src) {
+ if(sizeof(bool) == 1)
+ movb(dst, src);
+ else if(sizeof(bool) == 2)
+ movw(dst, src);
+ else if(sizeof(bool) == 4)
+ movl(dst, src);
+ else
+ // unsupported
+ ShouldNotReachHere();
+}
+
+void MacroAssembler::movbool(Address dst, bool boolconst) {
+ if(sizeof(bool) == 1)
+ movb(dst, (int) boolconst);
+ else if(sizeof(bool) == 2)
+ movw(dst, (int) boolconst);
+ else if(sizeof(bool) == 4)
+ movl(dst, (int) boolconst);
+ else
+ // unsupported
+ ShouldNotReachHere();
+}
+
+void MacroAssembler::movbool(Address dst, Register src) {
+ if(sizeof(bool) == 1)
+ movb(dst, src);
+ else if(sizeof(bool) == 2)
+ movw(dst, src);
+ else if(sizeof(bool) == 4)
+ movl(dst, src);
+ else
+ // unsupported
+ ShouldNotReachHere();
+}
+
+void MacroAssembler::movbyte(ArrayAddress dst, int src) {
+ movb(as_Address(dst), src);
+}
+
+void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ movdl(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ movdl(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::movq(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ movq(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ movq(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ if (UseXmmLoadAndClearUpper) {
+ movsd (dst, as_Address(src));
+ } else {
+ movlpd(dst, as_Address(src));
+ }
+ } else {
+ lea(rscratch1, src);
+ if (UseXmmLoadAndClearUpper) {
+ movsd (dst, Address(rscratch1, 0));
+ } else {
+ movlpd(dst, Address(rscratch1, 0));
+ }
+ }
+}
+
+void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ movss(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ movss(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::movptr(Register dst, Register src) {
+ LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
+}
+
+void MacroAssembler::movptr(Register dst, Address src) {
+ LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
+}
+
+// src should NEVER be a real pointer. Use AddressLiteral for true pointers
+void MacroAssembler::movptr(Register dst, intptr_t src) {
+ LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src));
+}
+
+void MacroAssembler::movptr(Address dst, Register src) {
+ LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
+}
+
+void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ Assembler::movdqu(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::movdqu(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ Assembler::movsd(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::movsd(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ Assembler::movss(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::movss(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ Assembler::mulsd(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::mulsd(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ Assembler::mulss(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::mulss(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::null_check(Register reg, int offset) {
+ if (needs_explicit_null_check(offset)) {
+ // provoke OS NULL exception if reg = NULL by
+ // accessing M[reg] w/o changing any (non-CC) registers
+ // NOTE: cmpl is plenty here to provoke a segv
+ cmpptr(rax, Address(reg, 0));
+ // Note: should probably use testl(rax, Address(reg, 0));
+ // may be shorter code (however, this version of
+ // testl needs to be implemented first)
+ } else {
+ // nothing to do, (later) access of M[reg + offset]
+ // will provoke OS NULL exception if reg = NULL
+ }
+}
+
+void MacroAssembler::os_breakpoint() {
+ // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
+ // (e.g., MSVC can't call ps() otherwise)
+ call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
+}
+
+void MacroAssembler::pop_CPU_state() {
+ pop_FPU_state();
+ pop_IU_state();
+}
+
+void MacroAssembler::pop_FPU_state() {
+ NOT_LP64(frstor(Address(rsp, 0));)
+ LP64_ONLY(fxrstor(Address(rsp, 0));)
+ addptr(rsp, FPUStateSizeInWords * wordSize);
+}
+
+void MacroAssembler::pop_IU_state() {
+ popa();
+ LP64_ONLY(addq(rsp, 8));
+ popf();
+}
+
+// Save Integer and Float state
+// Warning: Stack must be 16 byte aligned (64bit)
+void MacroAssembler::push_CPU_state() {
+ push_IU_state();
+ push_FPU_state();
+}
+
+void MacroAssembler::push_FPU_state() {
+ subptr(rsp, FPUStateSizeInWords * wordSize);
+#ifndef _LP64
+ fnsave(Address(rsp, 0));
+ fwait();
+#else
+ fxsave(Address(rsp, 0));
+#endif // LP64
+}
+
+void MacroAssembler::push_IU_state() {
+ // Push flags first because pusha kills them
+ pushf();
+ // Make sure rsp stays 16-byte aligned
+ LP64_ONLY(subq(rsp, 8));
+ pusha();
+}
+
+void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) {
+ // determine java_thread register
+ if (!java_thread->is_valid()) {
+ java_thread = rdi;
+ get_thread(java_thread);
+ }
+ // we must set sp to zero to clear frame
+ movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
+ if (clear_fp) {
+ movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
+ }
+
+ if (clear_pc)
+ movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
+
+}
+
+void MacroAssembler::restore_rax(Register tmp) {
+ if (tmp == noreg) pop(rax);
+ else if (tmp != rax) mov(rax, tmp);
+}
+
+void MacroAssembler::round_to(Register reg, int modulus) {
+ addptr(reg, modulus - 1);
+ andptr(reg, -modulus);
+}
+
+void MacroAssembler::save_rax(Register tmp) {
+ if (tmp == noreg) push(rax);
+ else if (tmp != rax) mov(tmp, rax);
+}
+
+// Write serialization page so VM thread can do a pseudo remote membar.
+// We use the current thread pointer to calculate a thread specific
+// offset to write to within the page. This minimizes bus traffic
+// due to cache line collision.
+void MacroAssembler::serialize_memory(Register thread, Register tmp) {
+ movl(tmp, thread);
+ shrl(tmp, os::get_serialize_page_shift_count());
+ andl(tmp, (os::vm_page_size() - sizeof(int)));
+
+ Address index(noreg, tmp, Address::times_1);
+ ExternalAddress page(os::get_memory_serialize_page());
+
+ // Size of store must match masking code above
+ movl(as_Address(ArrayAddress(page, index)), tmp);
+}
+
+// Calls to C land
+//
+// When entering C land, the rbp, & rsp of the last Java frame have to be recorded
+// in the (thread-local) JavaThread object. When leaving C land, the last Java fp
+// has to be reset to 0. This is required to allow proper stack traversal.
+void MacroAssembler::set_last_Java_frame(Register java_thread,
+ Register last_java_sp,
+ Register last_java_fp,
+ address last_java_pc) {
+ // determine java_thread register
+ if (!java_thread->is_valid()) {
+ java_thread = rdi;
+ get_thread(java_thread);
+ }
+ // determine last_java_sp register
+ if (!last_java_sp->is_valid()) {
+ last_java_sp = rsp;
+ }
+
+ // last_java_fp is optional
+
+ if (last_java_fp->is_valid()) {
+ movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
+ }
+
+ // last_java_pc is optional
+
+ if (last_java_pc != NULL) {
+ lea(Address(java_thread,
+ JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()),
+ InternalAddress(last_java_pc));
+
+ }
+ movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
+}
+
+void MacroAssembler::shlptr(Register dst, int imm8) {
+ LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8));
+}
+
+void MacroAssembler::shrptr(Register dst, int imm8) {
+ LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8));
+}
+
+void MacroAssembler::sign_extend_byte(Register reg) {
+ if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) {
+ movsbl(reg, reg); // movsxb
+ } else {
+ shll(reg, 24);
+ sarl(reg, 24);
+ }
+}
+
+void MacroAssembler::sign_extend_short(Register reg) {
+ if (LP64_ONLY(true ||) VM_Version::is_P6()) {
+ movswl(reg, reg); // movsxw
+ } else {
+ shll(reg, 16);
+ sarl(reg, 16);
+ }
+}
+
+void MacroAssembler::testl(Register dst, AddressLiteral src) {
+ assert(reachable(src), "Address should be reachable");
+ testl(dst, as_Address(src));
+}
+
+void MacroAssembler::sqrtsd(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ Assembler::sqrtsd(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::sqrtsd(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ Assembler::sqrtss(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::sqrtss(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ Assembler::subsd(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::subsd(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::subss(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ Assembler::subss(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::subss(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ Assembler::ucomisd(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::ucomisd(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ Assembler::ucomiss(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::ucomiss(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
+ // Used in sign-bit flipping with aligned address.
+ assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
+ if (reachable(src)) {
+ Assembler::xorpd(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::xorpd(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
+ // Used in sign-bit flipping with aligned address.
+ assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
+ if (reachable(src)) {
+ Assembler::xorps(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::xorps(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) {
+ // Used in sign-bit flipping with aligned address.
+ assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
+ if (reachable(src)) {
+ Assembler::pshufb(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::pshufb(dst, Address(rscratch1, 0));
+ }
+}
+
+// AVX 3-operands instructions
+
+void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
+ if (reachable(src)) {
+ vaddsd(dst, nds, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ vaddsd(dst, nds, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
+ if (reachable(src)) {
+ vaddss(dst, nds, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ vaddss(dst, nds, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
+ if (reachable(src)) {
+ vandpd(dst, nds, as_Address(src), vector256);
+ } else {
+ lea(rscratch1, src);
+ vandpd(dst, nds, Address(rscratch1, 0), vector256);
+ }
+}
+
+void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
+ if (reachable(src)) {
+ vandps(dst, nds, as_Address(src), vector256);
+ } else {
+ lea(rscratch1, src);
+ vandps(dst, nds, Address(rscratch1, 0), vector256);
+ }
+}
+
+void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
+ if (reachable(src)) {
+ vdivsd(dst, nds, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ vdivsd(dst, nds, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
+ if (reachable(src)) {
+ vdivss(dst, nds, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ vdivss(dst, nds, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
+ if (reachable(src)) {
+ vmulsd(dst, nds, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ vmulsd(dst, nds, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
+ if (reachable(src)) {
+ vmulss(dst, nds, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ vmulss(dst, nds, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
+ if (reachable(src)) {
+ vsubsd(dst, nds, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ vsubsd(dst, nds, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
+ if (reachable(src)) {
+ vsubss(dst, nds, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ vsubss(dst, nds, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
+ if (reachable(src)) {
+ vxorpd(dst, nds, as_Address(src), vector256);
+ } else {
+ lea(rscratch1, src);
+ vxorpd(dst, nds, Address(rscratch1, 0), vector256);
+ }
+}
+
+void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
+ if (reachable(src)) {
+ vxorps(dst, nds, as_Address(src), vector256);
+ } else {
+ lea(rscratch1, src);
+ vxorps(dst, nds, Address(rscratch1, 0), vector256);
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////////////////
+#ifndef SERIALGC
+
+void MacroAssembler::g1_write_barrier_pre(Register obj,
+ Register pre_val,
+ Register thread,
+ Register tmp,
+ bool tosca_live,
+ bool expand_call) {
+
+ // If expand_call is true then we expand the call_VM_leaf macro
+ // directly to skip generating the check by
+ // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
+
+#ifdef _LP64
+ assert(thread == r15_thread, "must be");
+#endif // _LP64
+
+ Label done;
+ Label runtime;
+
+ assert(pre_val != noreg, "check this code");
+
+ if (obj != noreg) {
+ assert_different_registers(obj, pre_val, tmp);
+ assert(pre_val != rax, "check this code");
+ }
+
+ Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
+ PtrQueue::byte_offset_of_active()));
+ Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
+ PtrQueue::byte_offset_of_index()));
+ Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
+ PtrQueue::byte_offset_of_buf()));
+
+
+ // Is marking active?
+ if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
+ cmpl(in_progress, 0);
+ } else {
+ assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
+ cmpb(in_progress, 0);
+ }
+ jcc(Assembler::equal, done);
+
+ // Do we need to load the previous value?
+ if (obj != noreg) {
+ load_heap_oop(pre_val, Address(obj, 0));
+ }
+
+ // Is the previous value null?
+ cmpptr(pre_val, (int32_t) NULL_WORD);
+ jcc(Assembler::equal, done);
+
+ // Can we store original value in the thread's buffer?
+ // Is index == 0?
+ // (The index field is typed as size_t.)
+
+ movptr(tmp, index); // tmp := *index_adr
+ cmpptr(tmp, 0); // tmp == 0?
+ jcc(Assembler::equal, runtime); // If yes, goto runtime
+
+ subptr(tmp, wordSize); // tmp := tmp - wordSize
+ movptr(index, tmp); // *index_adr := tmp
+ addptr(tmp, buffer); // tmp := tmp + *buffer_adr
+
+ // Record the previous value
+ movptr(Address(tmp, 0), pre_val);
+ jmp(done);
+
+ bind(runtime);
+ // save the live input values
+ if(tosca_live) push(rax);
+
+ if (obj != noreg && obj != rax)
+ push(obj);
+
+ if (pre_val != rax)
+ push(pre_val);
+
+ // Calling the runtime using the regular call_VM_leaf mechanism generates
+ // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
+ // that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL.
+ //
+ // If we care generating the pre-barrier without a frame (e.g. in the
+ // intrinsified Reference.get() routine) then ebp might be pointing to
+ // the caller frame and so this check will most likely fail at runtime.
+ //
+ // Expanding the call directly bypasses the generation of the check.
+ // So when we do not have have a full interpreter frame on the stack
+ // expand_call should be passed true.
+
+ NOT_LP64( push(thread); )
+
+ if (expand_call) {
+ LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); )
+ pass_arg1(this, thread);
+ pass_arg0(this, pre_val);
+ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2);
+ } else {
+ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
+ }
+
+ NOT_LP64( pop(thread); )
+
+ // save the live input values
+ if (pre_val != rax)
+ pop(pre_val);
+
+ if (obj != noreg && obj != rax)
+ pop(obj);
+
+ if(tosca_live) pop(rax);
+
+ bind(done);
+}
+
+void MacroAssembler::g1_write_barrier_post(Register store_addr,
+ Register new_val,
+ Register thread,
+ Register tmp,
+ Register tmp2) {
+#ifdef _LP64
+ assert(thread == r15_thread, "must be");
+#endif // _LP64
+
+ Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
+ PtrQueue::byte_offset_of_index()));
+ Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
+ PtrQueue::byte_offset_of_buf()));
+
+ BarrierSet* bs = Universe::heap()->barrier_set();
+ CardTableModRefBS* ct = (CardTableModRefBS*)bs;
+ Label done;
+ Label runtime;
+
+ // Does store cross heap regions?
+
+ movptr(tmp, store_addr);
+ xorptr(tmp, new_val);
+ shrptr(tmp, HeapRegion::LogOfHRGrainBytes);
+ jcc(Assembler::equal, done);
+
+ // crosses regions, storing NULL?
+
+ cmpptr(new_val, (int32_t) NULL_WORD);
+ jcc(Assembler::equal, done);
+
+ // storing region crossing non-NULL, is card already dirty?
+
+ ExternalAddress cardtable((address) ct->byte_map_base);
+ assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+#ifdef _LP64
+ const Register card_addr = tmp;
+
+ movq(card_addr, store_addr);
+ shrq(card_addr, CardTableModRefBS::card_shift);
+
+ lea(tmp2, cardtable);
+
+ // get the address of the card
+ addq(card_addr, tmp2);
+#else
+ const Register card_index = tmp;
+
+ movl(card_index, store_addr);
+ shrl(card_index, CardTableModRefBS::card_shift);
+
+ Address index(noreg, card_index, Address::times_1);
+ const Register card_addr = tmp;
+ lea(card_addr, as_Address(ArrayAddress(cardtable, index)));
+#endif
+ cmpb(Address(card_addr, 0), 0);
+ jcc(Assembler::equal, done);
+
+ // storing a region crossing, non-NULL oop, card is clean.
+ // dirty card and log.
+
+ movb(Address(card_addr, 0), 0);
+
+ cmpl(queue_index, 0);
+ jcc(Assembler::equal, runtime);
+ subl(queue_index, wordSize);
+ movptr(tmp2, buffer);
+#ifdef _LP64
+ movslq(rscratch1, queue_index);
+ addq(tmp2, rscratch1);
+ movq(Address(tmp2, 0), card_addr);
+#else
+ addl(tmp2, queue_index);
+ movl(Address(tmp2, 0), card_index);
+#endif
+ jmp(done);
+
+ bind(runtime);
+ // save the live input values
+ push(store_addr);
+ push(new_val);
+#ifdef _LP64
+ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, r15_thread);
+#else
+ push(thread);
+ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
+ pop(thread);
+#endif
+ pop(new_val);
+ pop(store_addr);
+
+ bind(done);
+}
+
+#endif // SERIALGC
+//////////////////////////////////////////////////////////////////////////////////
+
+
+void MacroAssembler::store_check(Register obj) {
+ // Does a store check for the oop in register obj. The content of
+ // register obj is destroyed afterwards.
+ store_check_part_1(obj);
+ store_check_part_2(obj);
+}
+
+void MacroAssembler::store_check(Register obj, Address dst) {
+ store_check(obj);
+}
+
+
+// split the store check operation so that other instructions can be scheduled inbetween
+void MacroAssembler::store_check_part_1(Register obj) {
+ BarrierSet* bs = Universe::heap()->barrier_set();
+ assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
+ shrptr(obj, CardTableModRefBS::card_shift);
+}
+
+void MacroAssembler::store_check_part_2(Register obj) {
+ BarrierSet* bs = Universe::heap()->barrier_set();
+ assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
+ CardTableModRefBS* ct = (CardTableModRefBS*)bs;
+ assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+
+ // The calculation for byte_map_base is as follows:
+ // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
+ // So this essentially converts an address to a displacement and
+ // it will never need to be relocated. On 64bit however the value may be too
+ // large for a 32bit displacement
+
+ intptr_t disp = (intptr_t) ct->byte_map_base;
+ if (is_simm32(disp)) {
+ Address cardtable(noreg, obj, Address::times_1, disp);
+ movb(cardtable, 0);
+ } else {
+ // By doing it as an ExternalAddress disp could be converted to a rip-relative
+ // displacement and done in a single instruction given favorable mapping and
+ // a smarter version of as_Address. Worst case it is two instructions which
+ // is no worse off then loading disp into a register and doing as a simple
+ // Address() as above.
+ // We can't do as ExternalAddress as the only style since if disp == 0 we'll
+ // assert since NULL isn't acceptable in a reloci (see 6644928). In any case
+ // in some cases we'll get a single instruction version.
+
+ ExternalAddress cardtable((address)disp);
+ Address index(noreg, obj, Address::times_1);
+ movb(as_Address(ArrayAddress(cardtable, index)), 0);
+ }
+}
+
+void MacroAssembler::subptr(Register dst, int32_t imm32) {
+ LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32));
+}
+
+// Force generation of a 4 byte immediate value even if it fits into 8bit
+void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
+ LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32));
+}
+
+void MacroAssembler::subptr(Register dst, Register src) {
+ LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src));
+}
+
+// C++ bool manipulation
+void MacroAssembler::testbool(Register dst) {
+ if(sizeof(bool) == 1)
+ testb(dst, 0xff);
+ else if(sizeof(bool) == 2) {
+ // testw implementation needed for two byte bools
+ ShouldNotReachHere();
+ } else if(sizeof(bool) == 4)
+ testl(dst, dst);
+ else
+ // unsupported
+ ShouldNotReachHere();
+}
+
+void MacroAssembler::testptr(Register dst, Register src) {
+ LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
+}
+
+// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
+void MacroAssembler::tlab_allocate(Register obj,
+ Register var_size_in_bytes,
+ int con_size_in_bytes,
+ Register t1,
+ Register t2,
+ Label& slow_case) {
+ assert_different_registers(obj, t1, t2);
+ assert_different_registers(obj, var_size_in_bytes, t1);
+ Register end = t2;
+ Register thread = NOT_LP64(t1) LP64_ONLY(r15_thread);
+
+ verify_tlab();
+
+ NOT_LP64(get_thread(thread));
+
+ movptr(obj, Address(thread, JavaThread::tlab_top_offset()));
+ if (var_size_in_bytes == noreg) {
+ lea(end, Address(obj, con_size_in_bytes));
+ } else {
+ lea(end, Address(obj, var_size_in_bytes, Address::times_1));
+ }
+ cmpptr(end, Address(thread, JavaThread::tlab_end_offset()));
+ jcc(Assembler::above, slow_case);
+
+ // update the tlab top pointer
+ movptr(Address(thread, JavaThread::tlab_top_offset()), end);
+
+ // recover var_size_in_bytes if necessary
+ if (var_size_in_bytes == end) {
+ subptr(var_size_in_bytes, obj);
+ }
+ verify_tlab();
+}
+
+// Preserves rbx, and rdx.
+Register MacroAssembler::tlab_refill(Label& retry,
+ Label& try_eden,
+ Label& slow_case) {
+ Register top = rax;
+ Register t1 = rcx;
+ Register t2 = rsi;
+ Register thread_reg = NOT_LP64(rdi) LP64_ONLY(r15_thread);
+ assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
+ Label do_refill, discard_tlab;
+
+ if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
+ // No allocation in the shared eden.
+ jmp(slow_case);
+ }
+
+ NOT_LP64(get_thread(thread_reg));
+
+ movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
+ movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
+
+ // calculate amount of free space
+ subptr(t1, top);
+ shrptr(t1, LogHeapWordSize);
+
+ // Retain tlab and allocate object in shared space if
+ // the amount free in the tlab is too large to discard.
+ cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
+ jcc(Assembler::lessEqual, discard_tlab);
+
+ // Retain
+ // %%% yuck as movptr...
+ movptr(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment());
+ addptr(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2);
+ if (TLABStats) {
+ // increment number of slow_allocations
+ addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1);
+ }
+ jmp(try_eden);
+
+ bind(discard_tlab);
+ if (TLABStats) {
+ // increment number of refills
+ addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1);
+ // accumulate wastage -- t1 is amount free in tlab
+ addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1);
+ }
+
+ // if tlab is currently allocated (top or end != null) then
+ // fill [top, end + alignment_reserve) with array object
+ testptr(top, top);
+ jcc(Assembler::zero, do_refill);
+
+ // set up the mark word
+ movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
+ // set the length to the remaining space
+ subptr(t1, typeArrayOopDesc::header_size(T_INT));
+ addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve());
+ shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint)));
+ movl(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
+ // set klass to intArrayKlass
+ // dubious reloc why not an oop reloc?
+ movptr(t1, ExternalAddress((address)Universe::intArrayKlassObj_addr()));
+ // store klass last. concurrent gcs assumes klass length is valid if
+ // klass field is not null.
+ store_klass(top, t1);
+
+ movptr(t1, top);
+ subptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
+ incr_allocated_bytes(thread_reg, t1, 0);
+
+ // refill the tlab with an eden allocation
+ bind(do_refill);
+ movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
+ shlptr(t1, LogHeapWordSize);
+ // allocate new tlab, address returned in top
+ eden_allocate(top, t1, 0, t2, slow_case);
+
+ // Check that t1 was preserved in eden_allocate.
+#ifdef ASSERT
+ if (UseTLAB) {
+ Label ok;
+ Register tsize = rsi;
+ assert_different_registers(tsize, thread_reg, t1);
+ push(tsize);
+ movptr(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
+ shlptr(tsize, LogHeapWordSize);
+ cmpptr(t1, tsize);
+ jcc(Assembler::equal, ok);
+ STOP("assert(t1 != tlab size)");
+ should_not_reach_here();
+
+ bind(ok);
+ pop(tsize);
+ }
+#endif
+ movptr(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top);
+ movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top);
+ addptr(top, t1);
+ subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
+ movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
+ verify_tlab();
+ jmp(retry);
+
+ return thread_reg; // for use by caller
+}
+
+void MacroAssembler::incr_allocated_bytes(Register thread,
+ Register var_size_in_bytes,
+ int con_size_in_bytes,
+ Register t1) {
+ if (!thread->is_valid()) {
+#ifdef _LP64
+ thread = r15_thread;
+#else
+ assert(t1->is_valid(), "need temp reg");
+ thread = t1;
+ get_thread(thread);
+#endif
+ }
+
+#ifdef _LP64
+ if (var_size_in_bytes->is_valid()) {
+ addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
+ } else {
+ addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
+ }
+#else
+ if (var_size_in_bytes->is_valid()) {
+ addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
+ } else {
+ addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
+ }
+ adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0);
+#endif
+}
+
+void MacroAssembler::fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use) {
+ pusha();
+
+ // if we are coming from c1, xmm registers may be live
+ int off = 0;
+ if (UseSSE == 1) {
+ subptr(rsp, sizeof(jdouble)*8);
+ movflt(Address(rsp,off++*sizeof(jdouble)),xmm0);
+ movflt(Address(rsp,off++*sizeof(jdouble)),xmm1);
+ movflt(Address(rsp,off++*sizeof(jdouble)),xmm2);
+ movflt(Address(rsp,off++*sizeof(jdouble)),xmm3);
+ movflt(Address(rsp,off++*sizeof(jdouble)),xmm4);
+ movflt(Address(rsp,off++*sizeof(jdouble)),xmm5);
+ movflt(Address(rsp,off++*sizeof(jdouble)),xmm6);
+ movflt(Address(rsp,off++*sizeof(jdouble)),xmm7);
+ } else if (UseSSE >= 2) {
+#ifdef COMPILER2
+ if (MaxVectorSize > 16) {
+ assert(UseAVX > 0, "256bit vectors are supported only with AVX");
+ // Save upper half of YMM registes
+ subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
+ vextractf128h(Address(rsp, 0),xmm0);
+ vextractf128h(Address(rsp, 16),xmm1);
+ vextractf128h(Address(rsp, 32),xmm2);
+ vextractf128h(Address(rsp, 48),xmm3);
+ vextractf128h(Address(rsp, 64),xmm4);
+ vextractf128h(Address(rsp, 80),xmm5);
+ vextractf128h(Address(rsp, 96),xmm6);
+ vextractf128h(Address(rsp,112),xmm7);
+#ifdef _LP64
+ vextractf128h(Address(rsp,128),xmm8);
+ vextractf128h(Address(rsp,144),xmm9);
+ vextractf128h(Address(rsp,160),xmm10);
+ vextractf128h(Address(rsp,176),xmm11);
+ vextractf128h(Address(rsp,192),xmm12);
+ vextractf128h(Address(rsp,208),xmm13);
+ vextractf128h(Address(rsp,224),xmm14);
+ vextractf128h(Address(rsp,240),xmm15);
+#endif
+ }
+#endif
+ // Save whole 128bit (16 bytes) XMM regiters
+ subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
+ movdqu(Address(rsp,off++*16),xmm0);
+ movdqu(Address(rsp,off++*16),xmm1);
+ movdqu(Address(rsp,off++*16),xmm2);
+ movdqu(Address(rsp,off++*16),xmm3);
+ movdqu(Address(rsp,off++*16),xmm4);
+ movdqu(Address(rsp,off++*16),xmm5);
+ movdqu(Address(rsp,off++*16),xmm6);
+ movdqu(Address(rsp,off++*16),xmm7);
+#ifdef _LP64
+ movdqu(Address(rsp,off++*16),xmm8);
+ movdqu(Address(rsp,off++*16),xmm9);
+ movdqu(Address(rsp,off++*16),xmm10);
+ movdqu(Address(rsp,off++*16),xmm11);
+ movdqu(Address(rsp,off++*16),xmm12);
+ movdqu(Address(rsp,off++*16),xmm13);
+ movdqu(Address(rsp,off++*16),xmm14);
+ movdqu(Address(rsp,off++*16),xmm15);
+#endif
+ }
+
+ // Preserve registers across runtime call
+ int incoming_argument_and_return_value_offset = -1;
+ if (num_fpu_regs_in_use > 1) {
+ // Must preserve all other FPU regs (could alternatively convert
+ // SharedRuntime::dsin, dcos etc. into assembly routines known not to trash
+ // FPU state, but can not trust C compiler)
+ NEEDS_CLEANUP;
+ // NOTE that in this case we also push the incoming argument(s) to
+ // the stack and restore it later; we also use this stack slot to
+ // hold the return value from dsin, dcos etc.
+ for (int i = 0; i < num_fpu_regs_in_use; i++) {
+ subptr(rsp, sizeof(jdouble));
+ fstp_d(Address(rsp, 0));
+ }
+ incoming_argument_and_return_value_offset = sizeof(jdouble)*(num_fpu_regs_in_use-1);
+ for (int i = nb_args-1; i >= 0; i--) {
+ fld_d(Address(rsp, incoming_argument_and_return_value_offset-i*sizeof(jdouble)));
+ }
+ }
+
+ subptr(rsp, nb_args*sizeof(jdouble));
+ for (int i = 0; i < nb_args; i++) {
+ fstp_d(Address(rsp, i*sizeof(jdouble)));
+ }
+
+#ifdef _LP64
+ if (nb_args > 0) {
+ movdbl(xmm0, Address(rsp, 0));
+ }
+ if (nb_args > 1) {
+ movdbl(xmm1, Address(rsp, sizeof(jdouble)));
+ }
+ assert(nb_args <= 2, "unsupported number of args");
+#endif // _LP64
+
+ // NOTE: we must not use call_VM_leaf here because that requires a
+ // complete interpreter frame in debug mode -- same bug as 4387334
+ // MacroAssembler::call_VM_leaf_base is perfectly safe and will
+ // do proper 64bit abi
+
+ NEEDS_CLEANUP;
+ // Need to add stack banging before this runtime call if it needs to
+ // be taken; however, there is no generic stack banging routine at
+ // the MacroAssembler level
+
+ MacroAssembler::call_VM_leaf_base(runtime_entry, 0);
+
+#ifdef _LP64
+ movsd(Address(rsp, 0), xmm0);
+ fld_d(Address(rsp, 0));
+#endif // _LP64
+ addptr(rsp, sizeof(jdouble) * nb_args);
+ if (num_fpu_regs_in_use > 1) {
+ // Must save return value to stack and then restore entire FPU
+ // stack except incoming arguments
+ fstp_d(Address(rsp, incoming_argument_and_return_value_offset));
+ for (int i = 0; i < num_fpu_regs_in_use - nb_args; i++) {
+ fld_d(Address(rsp, 0));
+ addptr(rsp, sizeof(jdouble));
+ }
+ fld_d(Address(rsp, (nb_args-1)*sizeof(jdouble)));
+ addptr(rsp, sizeof(jdouble) * nb_args);
+ }
+
+ off = 0;
+ if (UseSSE == 1) {
+ movflt(xmm0, Address(rsp,off++*sizeof(jdouble)));
+ movflt(xmm1, Address(rsp,off++*sizeof(jdouble)));
+ movflt(xmm2, Address(rsp,off++*sizeof(jdouble)));
+ movflt(xmm3, Address(rsp,off++*sizeof(jdouble)));
+ movflt(xmm4, Address(rsp,off++*sizeof(jdouble)));
+ movflt(xmm5, Address(rsp,off++*sizeof(jdouble)));
+ movflt(xmm6, Address(rsp,off++*sizeof(jdouble)));
+ movflt(xmm7, Address(rsp,off++*sizeof(jdouble)));
+ addptr(rsp, sizeof(jdouble)*8);
+ } else if (UseSSE >= 2) {
+ // Restore whole 128bit (16 bytes) XMM regiters
+ movdqu(xmm0, Address(rsp,off++*16));
+ movdqu(xmm1, Address(rsp,off++*16));
+ movdqu(xmm2, Address(rsp,off++*16));
+ movdqu(xmm3, Address(rsp,off++*16));
+ movdqu(xmm4, Address(rsp,off++*16));
+ movdqu(xmm5, Address(rsp,off++*16));
+ movdqu(xmm6, Address(rsp,off++*16));
+ movdqu(xmm7, Address(rsp,off++*16));
+#ifdef _LP64
+ movdqu(xmm8, Address(rsp,off++*16));
+ movdqu(xmm9, Address(rsp,off++*16));
+ movdqu(xmm10, Address(rsp,off++*16));
+ movdqu(xmm11, Address(rsp,off++*16));
+ movdqu(xmm12, Address(rsp,off++*16));
+ movdqu(xmm13, Address(rsp,off++*16));
+ movdqu(xmm14, Address(rsp,off++*16));
+ movdqu(xmm15, Address(rsp,off++*16));
+#endif
+ addptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
+#ifdef COMPILER2
+ if (MaxVectorSize > 16) {
+ // Restore upper half of YMM registes.
+ vinsertf128h(xmm0, Address(rsp, 0));
+ vinsertf128h(xmm1, Address(rsp, 16));
+ vinsertf128h(xmm2, Address(rsp, 32));
+ vinsertf128h(xmm3, Address(rsp, 48));
+ vinsertf128h(xmm4, Address(rsp, 64));
+ vinsertf128h(xmm5, Address(rsp, 80));
+ vinsertf128h(xmm6, Address(rsp, 96));
+ vinsertf128h(xmm7, Address(rsp,112));
+#ifdef _LP64
+ vinsertf128h(xmm8, Address(rsp,128));
+ vinsertf128h(xmm9, Address(rsp,144));
+ vinsertf128h(xmm10, Address(rsp,160));
+ vinsertf128h(xmm11, Address(rsp,176));
+ vinsertf128h(xmm12, Address(rsp,192));
+ vinsertf128h(xmm13, Address(rsp,208));
+ vinsertf128h(xmm14, Address(rsp,224));
+ vinsertf128h(xmm15, Address(rsp,240));
+#endif
+ addptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
+ }
+#endif
+ }
+ popa();
+}
+
+static const double pi_4 = 0.7853981633974483;
+
+void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) {
+ // A hand-coded argument reduction for values in fabs(pi/4, pi/2)
+ // was attempted in this code; unfortunately it appears that the
+ // switch to 80-bit precision and back causes this to be
+ // unprofitable compared with simply performing a runtime call if
+ // the argument is out of the (-pi/4, pi/4) range.
+
+ Register tmp = noreg;
+ if (!VM_Version::supports_cmov()) {
+ // fcmp needs a temporary so preserve rbx,
+ tmp = rbx;
+ push(tmp);
+ }
+
+ Label slow_case, done;
+
+ ExternalAddress pi4_adr = (address)&pi_4;
+ if (reachable(pi4_adr)) {
+ // x ?<= pi/4
+ fld_d(pi4_adr);
+ fld_s(1); // Stack: X PI/4 X
+ fabs(); // Stack: |X| PI/4 X
+ fcmp(tmp);
+ jcc(Assembler::above, slow_case);
+
+ // fastest case: -pi/4 <= x <= pi/4
+ switch(trig) {
+ case 's':
+ fsin();
+ break;
+ case 'c':
+ fcos();
+ break;
+ case 't':
+ ftan();
+ break;
+ default:
+ assert(false, "bad intrinsic");
+ break;
+ }
+ jmp(done);
+ }
+
+ // slow case: runtime call
+ bind(slow_case);
+
+ switch(trig) {
+ case 's':
+ {
+ fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), 1, num_fpu_regs_in_use);
+ }
+ break;
+ case 'c':
+ {
+ fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), 1, num_fpu_regs_in_use);
+ }
+ break;
+ case 't':
+ {
+ fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), 1, num_fpu_regs_in_use);
+ }
+ break;
+ default:
+ assert(false, "bad intrinsic");
+ break;
+ }
+
+ // Come here with result in F-TOS
+ bind(done);
+
+ if (tmp != noreg) {
+ pop(tmp);
+ }
+}
+
+
+// Look up the method for a megamorphic invokeinterface call.
+// The target method is determined by <intf_klass, itable_index>.
+// The receiver klass is in recv_klass.
+// On success, the result will be in method_result, and execution falls through.
+// On failure, execution transfers to the given label.
+void MacroAssembler::lookup_interface_method(Register recv_klass,
+ Register intf_klass,
+ RegisterOrConstant itable_index,
+ Register method_result,
+ Register scan_temp,
+ Label& L_no_such_interface) {
+ assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
+ assert(itable_index.is_constant() || itable_index.as_register() == method_result,
+ "caller must use same register for non-constant itable index as for method");
+
+ // Compute start of first itableOffsetEntry (which is at the end of the vtable)
+ int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
+ int itentry_off = itableMethodEntry::method_offset_in_bytes();
+ int scan_step = itableOffsetEntry::size() * wordSize;
+ int vte_size = vtableEntry::size() * wordSize;
+ Address::ScaleFactor times_vte_scale = Address::times_ptr;
+ assert(vte_size == wordSize, "else adjust times_vte_scale");
+
+ movl(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize));
+
+ // %%% Could store the aligned, prescaled offset in the klassoop.
+ lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
+ if (HeapWordsPerLong > 1) {
+ // Round up to align_object_offset boundary
+ // see code for InstanceKlass::start_of_itable!
+ round_to(scan_temp, BytesPerLong);
+ }
+
+ // Adjust recv_klass by scaled itable_index, so we can free itable_index.
+ assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
+ lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
+
+ // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
+ // if (scan->interface() == intf) {
+ // result = (klass + scan->offset() + itable_index);
+ // }
+ // }
+ Label search, found_method;
+
+ for (int peel = 1; peel >= 0; peel--) {
+ movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
+ cmpptr(intf_klass, method_result);
+
+ if (peel) {
+ jccb(Assembler::equal, found_method);
+ } else {
+ jccb(Assembler::notEqual, search);
+ // (invert the test to fall through to found_method...)
+ }
+
+ if (!peel) break;
+
+ bind(search);
+
+ // Check that the previous entry is non-null. A null entry means that
+ // the receiver class doesn't implement the interface, and wasn't the
+ // same as when the caller was compiled.
+ testptr(method_result, method_result);
+ jcc(Assembler::zero, L_no_such_interface);
+ addptr(scan_temp, scan_step);
+ }
+
+ bind(found_method);
+
+ // Got a hit.
+ movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
+ movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
+}
+
+
+// virtual method calling
+void MacroAssembler::lookup_virtual_method(Register recv_klass,
+ RegisterOrConstant vtable_index,
+ Register method_result) {
+ const int base = InstanceKlass::vtable_start_offset() * wordSize;
+ assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
+ Address vtable_entry_addr(recv_klass,
+ vtable_index, Address::times_ptr,
+ base + vtableEntry::method_offset_in_bytes());
+ movptr(method_result, vtable_entry_addr);
+}
+
+
+void MacroAssembler::check_klass_subtype(Register sub_klass,
+ Register super_klass,
+ Register temp_reg,
+ Label& L_success) {
+ Label L_failure;
+ check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL);
+ check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
+ bind(L_failure);
+}
+
+
+void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
+ Register super_klass,
+ Register temp_reg,
+ Label* L_success,
+ Label* L_failure,
+ Label* L_slow_path,
+ RegisterOrConstant super_check_offset) {
+ assert_different_registers(sub_klass, super_klass, temp_reg);
+ bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
+ if (super_check_offset.is_register()) {
+ assert_different_registers(sub_klass, super_klass,
+ super_check_offset.as_register());
+ } else if (must_load_sco) {
+ assert(temp_reg != noreg, "supply either a temp or a register offset");
+ }
+
+ Label L_fallthrough;
+ int label_nulls = 0;
+ if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
+ if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
+ if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
+ assert(label_nulls <= 1, "at most one NULL in the batch");
+
+ int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
+ int sco_offset = in_bytes(Klass::super_check_offset_offset());
+ Address super_check_offset_addr(super_klass, sco_offset);
+
+ // Hacked jcc, which "knows" that L_fallthrough, at least, is in
+ // range of a jccb. If this routine grows larger, reconsider at
+ // least some of these.
+#define local_jcc(assembler_cond, label) \
+ if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \
+ else jcc( assembler_cond, label) /*omit semi*/
+
+ // Hacked jmp, which may only be used just before L_fallthrough.
+#define final_jmp(label) \
+ if (&(label) == &L_fallthrough) { /*do nothing*/ } \
+ else jmp(label) /*omit semi*/
+
+ // If the pointers are equal, we are done (e.g., String[] elements).
+ // This self-check enables sharing of secondary supertype arrays among
+ // non-primary types such as array-of-interface. Otherwise, each such
+ // type would need its own customized SSA.
+ // We move this check to the front of the fast path because many
+ // type checks are in fact trivially successful in this manner,
+ // so we get a nicely predicted branch right at the start of the check.
+ cmpptr(sub_klass, super_klass);
+ local_jcc(Assembler::equal, *L_success);
+
+ // Check the supertype display:
+ if (must_load_sco) {
+ // Positive movl does right thing on LP64.
+ movl(temp_reg, super_check_offset_addr);
+ super_check_offset = RegisterOrConstant(temp_reg);
+ }
+ Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
+ cmpptr(super_klass, super_check_addr); // load displayed supertype
+
+ // This check has worked decisively for primary supers.
+ // Secondary supers are sought in the super_cache ('super_cache_addr').
+ // (Secondary supers are interfaces and very deeply nested subtypes.)
+ // This works in the same check above because of a tricky aliasing
+ // between the super_cache and the primary super display elements.
+ // (The 'super_check_addr' can address either, as the case requires.)
+ // Note that the cache is updated below if it does not help us find
+ // what we need immediately.
+ // So if it was a primary super, we can just fail immediately.
+ // Otherwise, it's the slow path for us (no success at this point).
+
+ if (super_check_offset.is_register()) {
+ local_jcc(Assembler::equal, *L_success);
+ cmpl(super_check_offset.as_register(), sc_offset);
+ if (L_failure == &L_fallthrough) {
+ local_jcc(Assembler::equal, *L_slow_path);
+ } else {
+ local_jcc(Assembler::notEqual, *L_failure);
+ final_jmp(*L_slow_path);
+ }
+ } else if (super_check_offset.as_constant() == sc_offset) {
+ // Need a slow path; fast failure is impossible.
+ if (L_slow_path == &L_fallthrough) {
+ local_jcc(Assembler::equal, *L_success);
+ } else {
+ local_jcc(Assembler::notEqual, *L_slow_path);
+ final_jmp(*L_success);
+ }
+ } else {
+ // No slow path; it's a fast decision.
+ if (L_failure == &L_fallthrough) {
+ local_jcc(Assembler::equal, *L_success);
+ } else {
+ local_jcc(Assembler::notEqual, *L_failure);
+ final_jmp(*L_success);
+ }
+ }
+
+ bind(L_fallthrough);
+
+#undef local_jcc
+#undef final_jmp
+}
+
+
+void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
+ Register super_klass,
+ Register temp_reg,
+ Register temp2_reg,
+ Label* L_success,
+ Label* L_failure,
+ bool set_cond_codes) {
+ assert_different_registers(sub_klass, super_klass, temp_reg);
+ if (temp2_reg != noreg)
+ assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
+#define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
+
+ Label L_fallthrough;
+ int label_nulls = 0;
+ if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
+ if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
+ assert(label_nulls <= 1, "at most one NULL in the batch");
+
+ // a couple of useful fields in sub_klass:
+ int ss_offset = in_bytes(Klass::secondary_supers_offset());
+ int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
+ Address secondary_supers_addr(sub_klass, ss_offset);
+ Address super_cache_addr( sub_klass, sc_offset);
+
+ // Do a linear scan of the secondary super-klass chain.
+ // This code is rarely used, so simplicity is a virtue here.
+ // The repne_scan instruction uses fixed registers, which we must spill.
+ // Don't worry too much about pre-existing connections with the input regs.
+
+ assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
+ assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
+
+ // Get super_klass value into rax (even if it was in rdi or rcx).
+ bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
+ if (super_klass != rax || UseCompressedOops) {
+ if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
+ mov(rax, super_klass);
+ }
+ if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
+ if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
+
+#ifndef PRODUCT
+ int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
+ ExternalAddress pst_counter_addr((address) pst_counter);
+ NOT_LP64( incrementl(pst_counter_addr) );
+ LP64_ONLY( lea(rcx, pst_counter_addr) );
+ LP64_ONLY( incrementl(Address(rcx, 0)) );
+#endif //PRODUCT
+
+ // We will consult the secondary-super array.
+ movptr(rdi, secondary_supers_addr);
+ // Load the array length. (Positive movl does right thing on LP64.)
+ movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes()));
+ // Skip to start of data.
+ addptr(rdi, Array<Klass*>::base_offset_in_bytes());
+
+ // Scan RCX words at [RDI] for an occurrence of RAX.
+ // Set NZ/Z based on last compare.
+ // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
+ // not change flags (only scas instruction which is repeated sets flags).
+ // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
+
+ testptr(rax,rax); // Set Z = 0
+ repne_scan();
+
+ // Unspill the temp. registers:
+ if (pushed_rdi) pop(rdi);
+ if (pushed_rcx) pop(rcx);
+ if (pushed_rax) pop(rax);
+
+ if (set_cond_codes) {
+ // Special hack for the AD files: rdi is guaranteed non-zero.
+ assert(!pushed_rdi, "rdi must be left non-NULL");
+ // Also, the condition codes are properly set Z/NZ on succeed/failure.
+ }
+
+ if (L_failure == &L_fallthrough)
+ jccb(Assembler::notEqual, *L_failure);
+ else jcc(Assembler::notEqual, *L_failure);
+
+ // Success. Cache the super we found and proceed in triumph.
+ movptr(super_cache_addr, super_klass);
+
+ if (L_success != &L_fallthrough) {
+ jmp(*L_success);
+ }
+
+#undef IS_A_TEMP
+
+ bind(L_fallthrough);
+}
+
+
+void MacroAssembler::cmov32(Condition cc, Register dst, Address src) {
+ if (VM_Version::supports_cmov()) {
+ cmovl(cc, dst, src);
+ } else {
+ Label L;
+ jccb(negate_condition(cc), L);
+ movl(dst, src);
+ bind(L);
+ }
+}
+
+void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
+ if (VM_Version::supports_cmov()) {
+ cmovl(cc, dst, src);
+ } else {
+ Label L;
+ jccb(negate_condition(cc), L);
+ movl(dst, src);
+ bind(L);
+ }
+}
+
+void MacroAssembler::verify_oop(Register reg, const char* s) {
+ if (!VerifyOops) return;
+
+ // Pass register number to verify_oop_subroutine
+ char* b = new char[strlen(s) + 50];
+ sprintf(b, "verify_oop: %s: %s", reg->name(), s);
+ BLOCK_COMMENT("verify_oop {");
+#ifdef _LP64
+ push(rscratch1); // save r10, trashed by movptr()
+#endif
+ push(rax); // save rax,
+ push(reg); // pass register argument
+ ExternalAddress buffer((address) b);
+ // avoid using pushptr, as it modifies scratch registers
+ // and our contract is not to modify anything
+ movptr(rax, buffer.addr());
+ push(rax);
+ // call indirectly to solve generation ordering problem
+ movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
+ call(rax);
+ // Caller pops the arguments (oop, message) and restores rax, r10
+ BLOCK_COMMENT("} verify_oop");
+}
+
+
+RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
+ Register tmp,
+ int offset) {
+ intptr_t value = *delayed_value_addr;
+ if (value != 0)
+ return RegisterOrConstant(value + offset);
+
+ // load indirectly to solve generation ordering problem
+ movptr(tmp, ExternalAddress((address) delayed_value_addr));
+
+#ifdef ASSERT
+ { Label L;
+ testptr(tmp, tmp);
+ if (WizardMode) {
+ jcc(Assembler::notZero, L);
+ char* buf = new char[40];
+ sprintf(buf, "DelayedValue="INTPTR_FORMAT, delayed_value_addr[1]);
+ STOP(buf);
+ } else {
+ jccb(Assembler::notZero, L);
+ hlt();
+ }
+ bind(L);
+ }
+#endif
+
+ if (offset != 0)
+ addptr(tmp, offset);
+
+ return RegisterOrConstant(tmp);
+}
+
+
+Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
+ int extra_slot_offset) {
+ // cf. TemplateTable::prepare_invoke(), if (load_receiver).
+ int stackElementSize = Interpreter::stackElementSize;
+ int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
+#ifdef ASSERT
+ int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
+ assert(offset1 - offset == stackElementSize, "correct arithmetic");
+#endif
+ Register scale_reg = noreg;
+ Address::ScaleFactor scale_factor = Address::no_scale;
+ if (arg_slot.is_constant()) {
+ offset += arg_slot.as_constant() * stackElementSize;
+ } else {
+ scale_reg = arg_slot.as_register();
+ scale_factor = Address::times(stackElementSize);
+ }
+ offset += wordSize; // return PC is on stack
+ return Address(rsp, scale_reg, scale_factor, offset);
+}
+
+
+void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
+ if (!VerifyOops) return;
+
+ // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
+ // Pass register number to verify_oop_subroutine
+ char* b = new char[strlen(s) + 50];
+ sprintf(b, "verify_oop_addr: %s", s);
+
+#ifdef _LP64
+ push(rscratch1); // save r10, trashed by movptr()
+#endif
+ push(rax); // save rax,
+ // addr may contain rsp so we will have to adjust it based on the push
+ // we just did (and on 64 bit we do two pushes)
+ // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
+ // stores rax into addr which is backwards of what was intended.
+ if (addr.uses(rsp)) {
+ lea(rax, addr);
+ pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
+ } else {
+ pushptr(addr);
+ }
+
+ ExternalAddress buffer((address) b);
+ // pass msg argument
+ // avoid using pushptr, as it modifies scratch registers
+ // and our contract is not to modify anything
+ movptr(rax, buffer.addr());
+ push(rax);
+
+ // call indirectly to solve generation ordering problem
+ movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
+ call(rax);
+ // Caller pops the arguments (addr, message) and restores rax, r10.
+}
+
+void MacroAssembler::verify_tlab() {
+#ifdef ASSERT
+ if (UseTLAB && VerifyOops) {
+ Label next, ok;
+ Register t1 = rsi;
+ Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread);
+
+ push(t1);
+ NOT_LP64(push(thread_reg));
+ NOT_LP64(get_thread(thread_reg));
+
+ movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
+ cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
+ jcc(Assembler::aboveEqual, next);
+ STOP("assert(top >= start)");
+ should_not_reach_here();
+
+ bind(next);
+ movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
+ cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
+ jcc(Assembler::aboveEqual, ok);
+ STOP("assert(top <= end)");
+ should_not_reach_here();
+
+ bind(ok);
+ NOT_LP64(pop(thread_reg));
+ pop(t1);
+ }
+#endif
+}
+
+class ControlWord {
+ public:
+ int32_t _value;
+
+ int rounding_control() const { return (_value >> 10) & 3 ; }
+ int precision_control() const { return (_value >> 8) & 3 ; }
+ bool precision() const { return ((_value >> 5) & 1) != 0; }
+ bool underflow() const { return ((_value >> 4) & 1) != 0; }
+ bool overflow() const { return ((_value >> 3) & 1) != 0; }
+ bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
+ bool denormalized() const { return ((_value >> 1) & 1) != 0; }
+ bool invalid() const { return ((_value >> 0) & 1) != 0; }
+
+ void print() const {
+ // rounding control
+ const char* rc;
+ switch (rounding_control()) {
+ case 0: rc = "round near"; break;
+ case 1: rc = "round down"; break;
+ case 2: rc = "round up "; break;
+ case 3: rc = "chop "; break;
+ };
+ // precision control
+ const char* pc;
+ switch (precision_control()) {
+ case 0: pc = "24 bits "; break;
+ case 1: pc = "reserved"; break;
+ case 2: pc = "53 bits "; break;
+ case 3: pc = "64 bits "; break;
+ };
+ // flags
+ char f[9];
+ f[0] = ' ';
+ f[1] = ' ';
+ f[2] = (precision ()) ? 'P' : 'p';
+ f[3] = (underflow ()) ? 'U' : 'u';
+ f[4] = (overflow ()) ? 'O' : 'o';
+ f[5] = (zero_divide ()) ? 'Z' : 'z';
+ f[6] = (denormalized()) ? 'D' : 'd';
+ f[7] = (invalid ()) ? 'I' : 'i';
+ f[8] = '\x0';
+ // output
+ printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
+ }
+
+};
+
+class StatusWord {
+ public:
+ int32_t _value;
+
+ bool busy() const { return ((_value >> 15) & 1) != 0; }
+ bool C3() const { return ((_value >> 14) & 1) != 0; }
+ bool C2() const { return ((_value >> 10) & 1) != 0; }
+ bool C1() const { return ((_value >> 9) & 1) != 0; }
+ bool C0() const { return ((_value >> 8) & 1) != 0; }
+ int top() const { return (_value >> 11) & 7 ; }
+ bool error_status() const { return ((_value >> 7) & 1) != 0; }
+ bool stack_fault() const { return ((_value >> 6) & 1) != 0; }
+ bool precision() const { return ((_value >> 5) & 1) != 0; }
+ bool underflow() const { return ((_value >> 4) & 1) != 0; }
+ bool overflow() const { return ((_value >> 3) & 1) != 0; }
+ bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
+ bool denormalized() const { return ((_value >> 1) & 1) != 0; }
+ bool invalid() const { return ((_value >> 0) & 1) != 0; }
+
+ void print() const {
+ // condition codes
+ char c[5];
+ c[0] = (C3()) ? '3' : '-';
+ c[1] = (C2()) ? '2' : '-';
+ c[2] = (C1()) ? '1' : '-';
+ c[3] = (C0()) ? '0' : '-';
+ c[4] = '\x0';
+ // flags
+ char f[9];
+ f[0] = (error_status()) ? 'E' : '-';
+ f[1] = (stack_fault ()) ? 'S' : '-';
+ f[2] = (precision ()) ? 'P' : '-';
+ f[3] = (underflow ()) ? 'U' : '-';
+ f[4] = (overflow ()) ? 'O' : '-';
+ f[5] = (zero_divide ()) ? 'Z' : '-';
+ f[6] = (denormalized()) ? 'D' : '-';
+ f[7] = (invalid ()) ? 'I' : '-';
+ f[8] = '\x0';
+ // output
+ printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top());
+ }
+
+};
+
+class TagWord {
+ public:
+ int32_t _value;
+
+ int tag_at(int i) const { return (_value >> (i*2)) & 3; }
+
+ void print() const {
+ printf("%04x", _value & 0xFFFF);
+ }
+
+};
+
+class FPU_Register {
+ public:
+ int32_t _m0;
+ int32_t _m1;
+ int16_t _ex;
+
+ bool is_indefinite() const {
+ return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
+ }
+
+ void print() const {
+ char sign = (_ex < 0) ? '-' : '+';
+ const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " ";
+ printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind);
+ };
+
+};
+
+class FPU_State {
+ public:
+ enum {
+ register_size = 10,
+ number_of_registers = 8,
+ register_mask = 7
+ };
+
+ ControlWord _control_word;
+ StatusWord _status_word;
+ TagWord _tag_word;
+ int32_t _error_offset;
+ int32_t _error_selector;
+ int32_t _data_offset;
+ int32_t _data_selector;
+ int8_t _register[register_size * number_of_registers];
+
+ int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
+ FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; }
+
+ const char* tag_as_string(int tag) const {
+ switch (tag) {
+ case 0: return "valid";
+ case 1: return "zero";
+ case 2: return "special";
+ case 3: return "empty";
+ }
+ ShouldNotReachHere();
+ return NULL;
+ }
+
+ void print() const {
+ // print computation registers
+ { int t = _status_word.top();
+ for (int i = 0; i < number_of_registers; i++) {
+ int j = (i - t) & register_mask;
+ printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
+ st(j)->print();
+ printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
+ }
+ }
+ printf("\n");
+ // print control registers
+ printf("ctrl = "); _control_word.print(); printf("\n");
+ printf("stat = "); _status_word .print(); printf("\n");
+ printf("tags = "); _tag_word .print(); printf("\n");
+ }
+
+};
+
+class Flag_Register {
+ public:
+ int32_t _value;
+
+ bool overflow() const { return ((_value >> 11) & 1) != 0; }
+ bool direction() const { return ((_value >> 10) & 1) != 0; }
+ bool sign() const { return ((_value >> 7) & 1) != 0; }
+ bool zero() const { return ((_value >> 6) & 1) != 0; }
+ bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; }
+ bool parity() const { return ((_value >> 2) & 1) != 0; }
+ bool carry() const { return ((_value >> 0) & 1) != 0; }
+
+ void print() const {
+ // flags
+ char f[8];
+ f[0] = (overflow ()) ? 'O' : '-';
+ f[1] = (direction ()) ? 'D' : '-';
+ f[2] = (sign ()) ? 'S' : '-';
+ f[3] = (zero ()) ? 'Z' : '-';
+ f[4] = (auxiliary_carry()) ? 'A' : '-';
+ f[5] = (parity ()) ? 'P' : '-';
+ f[6] = (carry ()) ? 'C' : '-';
+ f[7] = '\x0';
+ // output
+ printf("%08x flags = %s", _value, f);
+ }
+
+};
+
+class IU_Register {
+ public:
+ int32_t _value;
+
+ void print() const {
+ printf("%08x %11d", _value, _value);
+ }
+
+};
+
+class IU_State {
+ public:
+ Flag_Register _eflags;
+ IU_Register _rdi;
+ IU_Register _rsi;
+ IU_Register _rbp;
+ IU_Register _rsp;
+ IU_Register _rbx;
+ IU_Register _rdx;
+ IU_Register _rcx;
+ IU_Register _rax;
+
+ void print() const {
+ // computation registers
+ printf("rax, = "); _rax.print(); printf("\n");
+ printf("rbx, = "); _rbx.print(); printf("\n");
+ printf("rcx = "); _rcx.print(); printf("\n");
+ printf("rdx = "); _rdx.print(); printf("\n");
+ printf("rdi = "); _rdi.print(); printf("\n");
+ printf("rsi = "); _rsi.print(); printf("\n");
+ printf("rbp, = "); _rbp.print(); printf("\n");
+ printf("rsp = "); _rsp.print(); printf("\n");
+ printf("\n");
+ // control registers
+ printf("flgs = "); _eflags.print(); printf("\n");
+ }
+};
+
+
+class CPU_State {
+ public:
+ FPU_State _fpu_state;
+ IU_State _iu_state;
+
+ void print() const {
+ printf("--------------------------------------------------\n");
+ _iu_state .print();
+ printf("\n");
+ _fpu_state.print();
+ printf("--------------------------------------------------\n");
+ }
+
+};
+
+
+static void _print_CPU_state(CPU_State* state) {
+ state->print();
+};
+
+
+void MacroAssembler::print_CPU_state() {
+ push_CPU_state();
+ push(rsp); // pass CPU state
+ call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
+ addptr(rsp, wordSize); // discard argument
+ pop_CPU_state();
+}
+
+
+static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) {
+ static int counter = 0;
+ FPU_State* fs = &state->_fpu_state;
+ counter++;
+ // For leaf calls, only verify that the top few elements remain empty.
+ // We only need 1 empty at the top for C2 code.
+ if( stack_depth < 0 ) {
+ if( fs->tag_for_st(7) != 3 ) {
+ printf("FPR7 not empty\n");
+ state->print();
+ assert(false, "error");
+ return false;
+ }
+ return true; // All other stack states do not matter
+ }
+
+ assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std,
+ "bad FPU control word");
+
+ // compute stack depth
+ int i = 0;
+ while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++;
+ int d = i;
+ while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++;
+ // verify findings
+ if (i != FPU_State::number_of_registers) {
+ // stack not contiguous
+ printf("%s: stack not contiguous at ST%d\n", s, i);
+ state->print();
+ assert(false, "error");
+ return false;
+ }
+ // check if computed stack depth corresponds to expected stack depth
+ if (stack_depth < 0) {
+ // expected stack depth is -stack_depth or less
+ if (d > -stack_depth) {
+ // too many elements on the stack
+ printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d);
+ state->print();
+ assert(false, "error");
+ return false;
+ }
+ } else {
+ // expected stack depth is stack_depth
+ if (d != stack_depth) {
+ // wrong stack depth
+ printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d);
+ state->print();
+ assert(false, "error");
+ return false;
+ }
+ }
+ // everything is cool
+ return true;
+}
+
+
+void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
+ if (!VerifyFPU) return;
+ push_CPU_state();
+ push(rsp); // pass CPU state
+ ExternalAddress msg((address) s);
+ // pass message string s
+ pushptr(msg.addr());
+ push(stack_depth); // pass stack depth
+ call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU)));
+ addptr(rsp, 3 * wordSize); // discard arguments
+ // check for error
+ { Label L;
+ testl(rax, rax);
+ jcc(Assembler::notZero, L);
+ int3(); // break if error condition
+ bind(L);
+ }
+ pop_CPU_state();
+}
+
+void MacroAssembler::load_klass(Register dst, Register src) {
+#ifdef _LP64
+ if (UseCompressedKlassPointers) {
+ movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
+ decode_klass_not_null(dst);
+ } else
+#endif
+ movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
+}
+
+void MacroAssembler::load_prototype_header(Register dst, Register src) {
+#ifdef _LP64
+ if (UseCompressedKlassPointers) {
+ assert (Universe::heap() != NULL, "java heap should be initialized");
+ movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
+ if (Universe::narrow_klass_shift() != 0) {
+ assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+ assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
+ movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset()));
+ } else {
+ movq(dst, Address(dst, Klass::prototype_header_offset()));
+ }
+ } else
+#endif
+ {
+ movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
+ movptr(dst, Address(dst, Klass::prototype_header_offset()));
+ }
+}
+
+void MacroAssembler::store_klass(Register dst, Register src) {
+#ifdef _LP64
+ if (UseCompressedKlassPointers) {
+ encode_klass_not_null(src);
+ movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
+ } else
+#endif
+ movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
+}
+
+void MacroAssembler::load_heap_oop(Register dst, Address src) {
+#ifdef _LP64
+ // FIXME: Must change all places where we try to load the klass.
+ if (UseCompressedOops) {
+ movl(dst, src);
+ decode_heap_oop(dst);
+ } else
+#endif
+ movptr(dst, src);
+}
+
+// Doesn't do verfication, generates fixed size code
+void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) {
+#ifdef _LP64
+ if (UseCompressedOops) {
+ movl(dst, src);
+ decode_heap_oop_not_null(dst);
+ } else
+#endif
+ movptr(dst, src);
+}
+
+void MacroAssembler::store_heap_oop(Address dst, Register src) {
+#ifdef _LP64
+ if (UseCompressedOops) {
+ assert(!dst.uses(src), "not enough registers");
+ encode_heap_oop(src);
+ movl(dst, src);
+ } else
+#endif
+ movptr(dst, src);
+}
+
+void MacroAssembler::cmp_heap_oop(Register src1, Address src2, Register tmp) {
+ assert_different_registers(src1, tmp);
+#ifdef _LP64
+ if (UseCompressedOops) {
+ bool did_push = false;
+ if (tmp == noreg) {
+ tmp = rax;
+ push(tmp);
+ did_push = true;
+ assert(!src2.uses(rsp), "can't push");
+ }
+ load_heap_oop(tmp, src2);
+ cmpptr(src1, tmp);
+ if (did_push) pop(tmp);
+ } else
+#endif
+ cmpptr(src1, src2);
+}
+
+// Used for storing NULLs.
+void MacroAssembler::store_heap_oop_null(Address dst) {
+#ifdef _LP64
+ if (UseCompressedOops) {
+ movl(dst, (int32_t)NULL_WORD);
+ } else {
+ movslq(dst, (int32_t)NULL_WORD);
+ }
+#else
+ movl(dst, (int32_t)NULL_WORD);
+#endif
+}
+
+#ifdef _LP64
+void MacroAssembler::store_klass_gap(Register dst, Register src) {
+ if (UseCompressedKlassPointers) {
+ // Store to klass gap in destination
+ movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
+ }
+}
+
+#ifdef ASSERT
+void MacroAssembler::verify_heapbase(const char* msg) {
+ assert (UseCompressedOops || UseCompressedKlassPointers, "should be compressed");
+ assert (Universe::heap() != NULL, "java heap should be initialized");
+ if (CheckCompressedOops) {
+ Label ok;
+ push(rscratch1); // cmpptr trashes rscratch1
+ cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
+ jcc(Assembler::equal, ok);
+ STOP(msg);
+ bind(ok);
+ pop(rscratch1);
+ }
+}
+#endif
+
+// Algorithm must match oop.inline.hpp encode_heap_oop.
+void MacroAssembler::encode_heap_oop(Register r) {
+#ifdef ASSERT
+ verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
+#endif
+ verify_oop(r, "broken oop in encode_heap_oop");
+ if (Universe::narrow_oop_base() == NULL) {
+ if (Universe::narrow_oop_shift() != 0) {
+ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+ shrq(r, LogMinObjAlignmentInBytes);
+ }
+ return;
+ }
+ testq(r, r);
+ cmovq(Assembler::equal, r, r12_heapbase);
+ subq(r, r12_heapbase);
+ shrq(r, LogMinObjAlignmentInBytes);
+}
+
+void MacroAssembler::encode_heap_oop_not_null(Register r) {
+#ifdef ASSERT
+ verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
+ if (CheckCompressedOops) {
+ Label ok;
+ testq(r, r);
+ jcc(Assembler::notEqual, ok);
+ STOP("null oop passed to encode_heap_oop_not_null");
+ bind(ok);
+ }
+#endif
+ verify_oop(r, "broken oop in encode_heap_oop_not_null");
+ if (Universe::narrow_oop_base() != NULL) {
+ subq(r, r12_heapbase);
+ }
+ if (Universe::narrow_oop_shift() != 0) {
+ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+ shrq(r, LogMinObjAlignmentInBytes);
+ }
+}
+
+void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
+#ifdef ASSERT
+ verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
+ if (CheckCompressedOops) {
+ Label ok;
+ testq(src, src);
+ jcc(Assembler::notEqual, ok);
+ STOP("null oop passed to encode_heap_oop_not_null2");
+ bind(ok);
+ }
+#endif
+ verify_oop(src, "broken oop in encode_heap_oop_not_null2");
+ if (dst != src) {
+ movq(dst, src);
+ }
+ if (Universe::narrow_oop_base() != NULL) {
+ subq(dst, r12_heapbase);
+ }
+ if (Universe::narrow_oop_shift() != 0) {
+ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+ shrq(dst, LogMinObjAlignmentInBytes);
+ }
+}
+
+void MacroAssembler::decode_heap_oop(Register r) {
+#ifdef ASSERT
+ verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
+#endif
+ if (Universe::narrow_oop_base() == NULL) {
+ if (Universe::narrow_oop_shift() != 0) {
+ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+ shlq(r, LogMinObjAlignmentInBytes);
+ }
+ } else {
+ Label done;
+ shlq(r, LogMinObjAlignmentInBytes);
+ jccb(Assembler::equal, done);
+ addq(r, r12_heapbase);
+ bind(done);
+ }
+ verify_oop(r, "broken oop in decode_heap_oop");
+}
+
+void MacroAssembler::decode_heap_oop_not_null(Register r) {
+ // Note: it will change flags
+ assert (UseCompressedOops, "should only be used for compressed headers");
+ assert (Universe::heap() != NULL, "java heap should be initialized");
+ // Cannot assert, unverified entry point counts instructions (see .ad file)
+ // vtableStubs also counts instructions in pd_code_size_limit.
+ // Also do not verify_oop as this is called by verify_oop.
+ if (Universe::narrow_oop_shift() != 0) {
+ assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+ shlq(r, LogMinObjAlignmentInBytes);
+ if (Universe::narrow_oop_base() != NULL) {
+ addq(r, r12_heapbase);
+ }
+ } else {
+ assert (Universe::narrow_oop_base() == NULL, "sanity");
+ }
+}
+
+void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
+ // Note: it will change flags
+ assert (UseCompressedOops, "should only be used for compressed headers");
+ assert (Universe::heap() != NULL, "java heap should be initialized");
+ // Cannot assert, unverified entry point counts instructions (see .ad file)
+ // vtableStubs also counts instructions in pd_code_size_limit.
+ // Also do not verify_oop as this is called by verify_oop.
+ if (Universe::narrow_oop_shift() != 0) {
+ assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+ if (LogMinObjAlignmentInBytes == Address::times_8) {
+ leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
+ } else {
+ if (dst != src) {
+ movq(dst, src);
+ }
+ shlq(dst, LogMinObjAlignmentInBytes);
+ if (Universe::narrow_oop_base() != NULL) {
+ addq(dst, r12_heapbase);
+ }
+ }
+ } else {
+ assert (Universe::narrow_oop_base() == NULL, "sanity");
+ if (dst != src) {
+ movq(dst, src);
+ }
+ }
+}
+
+void MacroAssembler::encode_klass_not_null(Register r) {
+ assert(Metaspace::is_initialized(), "metaspace should be initialized");
+#ifdef ASSERT
+ verify_heapbase("MacroAssembler::encode_klass_not_null: heap base corrupted?");
+#endif
+ if (Universe::narrow_klass_base() != NULL) {
+ subq(r, r12_heapbase);
+ }
+ if (Universe::narrow_klass_shift() != 0) {
+ assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+ shrq(r, LogKlassAlignmentInBytes);
+ }
+}
+
+void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
+ assert(Metaspace::is_initialized(), "metaspace should be initialized");
+#ifdef ASSERT
+ verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?");
+#endif
+ if (dst != src) {
+ movq(dst, src);
+ }
+ if (Universe::narrow_klass_base() != NULL) {
+ subq(dst, r12_heapbase);
+ }
+ if (Universe::narrow_klass_shift() != 0) {
+ assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+ shrq(dst, LogKlassAlignmentInBytes);
+ }
+}
+
+void MacroAssembler::decode_klass_not_null(Register r) {
+ assert(Metaspace::is_initialized(), "metaspace should be initialized");
+ // Note: it will change flags
+ assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+ // Cannot assert, unverified entry point counts instructions (see .ad file)
+ // vtableStubs also counts instructions in pd_code_size_limit.
+ // Also do not verify_oop as this is called by verify_oop.
+ if (Universe::narrow_klass_shift() != 0) {
+ assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+ shlq(r, LogKlassAlignmentInBytes);
+ if (Universe::narrow_klass_base() != NULL) {
+ addq(r, r12_heapbase);
+ }
+ } else {
+ assert (Universe::narrow_klass_base() == NULL, "sanity");
+ }
+}
+
+void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
+ assert(Metaspace::is_initialized(), "metaspace should be initialized");
+ // Note: it will change flags
+ assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+ // Cannot assert, unverified entry point counts instructions (see .ad file)
+ // vtableStubs also counts instructions in pd_code_size_limit.
+ // Also do not verify_oop as this is called by verify_oop.
+ if (Universe::narrow_klass_shift() != 0) {
+ assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
+ assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
+ leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
+ } else {
+ assert (Universe::narrow_klass_base() == NULL, "sanity");
+ if (dst != src) {
+ movq(dst, src);
+ }
+ }
+}
+
+void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
+ assert (UseCompressedOops, "should only be used for compressed headers");
+ assert (Universe::heap() != NULL, "java heap should be initialized");
+ assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
+ int oop_index = oop_recorder()->find_index(obj);
+ RelocationHolder rspec = oop_Relocation::spec(oop_index);
+ mov_narrow_oop(dst, oop_index, rspec);
+}
+
+void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
+ assert (UseCompressedOops, "should only be used for compressed headers");
+ assert (Universe::heap() != NULL, "java heap should be initialized");
+ assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
+ int oop_index = oop_recorder()->find_index(obj);
+ RelocationHolder rspec = oop_Relocation::spec(oop_index);
+ mov_narrow_oop(dst, oop_index, rspec);
+}
+
+void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
+ assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+ assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
+ int klass_index = oop_recorder()->find_index(k);
+ RelocationHolder rspec = metadata_Relocation::spec(klass_index);
+ mov_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
+}
+
+void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
+ assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+ assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
+ int klass_index = oop_recorder()->find_index(k);
+ RelocationHolder rspec = metadata_Relocation::spec(klass_index);
+ mov_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
+}
+
+void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
+ assert (UseCompressedOops, "should only be used for compressed headers");
+ assert (Universe::heap() != NULL, "java heap should be initialized");
+ assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
+ int oop_index = oop_recorder()->find_index(obj);
+ RelocationHolder rspec = oop_Relocation::spec(oop_index);
+ Assembler::cmp_narrow_oop(dst, oop_index, rspec);
+}
+
+void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
+ assert (UseCompressedOops, "should only be used for compressed headers");
+ assert (Universe::heap() != NULL, "java heap should be initialized");
+ assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
+ int oop_index = oop_recorder()->find_index(obj);
+ RelocationHolder rspec = oop_Relocation::spec(oop_index);
+ Assembler::cmp_narrow_oop(dst, oop_index, rspec);
+}
+
+void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
+ assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+ assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
+ int klass_index = oop_recorder()->find_index(k);
+ RelocationHolder rspec = metadata_Relocation::spec(klass_index);
+ Assembler::cmp_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
+}
+
+void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
+ assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+ assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
+ int klass_index = oop_recorder()->find_index(k);
+ RelocationHolder rspec = metadata_Relocation::spec(klass_index);
+ Assembler::cmp_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
+}
+
+void MacroAssembler::reinit_heapbase() {
+ if (UseCompressedOops || UseCompressedKlassPointers) {
+ movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
+ }
+}
+#endif // _LP64
+
+
+// C2 compiled method's prolog code.
+void MacroAssembler::verified_entry(int framesize, bool stack_bang, bool fp_mode_24b) {
+
+ // WARNING: Initial instruction MUST be 5 bytes or longer so that
+ // NativeJump::patch_verified_entry will be able to patch out the entry
+ // code safely. The push to verify stack depth is ok at 5 bytes,
+ // the frame allocation can be either 3 or 6 bytes. So if we don't do
+ // stack bang then we must use the 6 byte frame allocation even if
+ // we have no frame. :-(
+
+ assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
+ // Remove word for return addr
+ framesize -= wordSize;
+
+ // Calls to C2R adapters often do not accept exceptional returns.
+ // We require that their callers must bang for them. But be careful, because
+ // some VM calls (such as call site linkage) can use several kilobytes of
+ // stack. But the stack safety zone should account for that.
+ // See bugs 4446381, 4468289, 4497237.
+ if (stack_bang) {
+ generate_stack_overflow_check(framesize);
+
+ // We always push rbp, so that on return to interpreter rbp, will be
+ // restored correctly and we can correct the stack.
+ push(rbp);
+ // Remove word for ebp
+ framesize -= wordSize;
+
+ // Create frame
+ if (framesize) {
+ subptr(rsp, framesize);
+ }
+ } else {
+ // Create frame (force generation of a 4 byte immediate value)
+ subptr_imm32(rsp, framesize);
+
+ // Save RBP register now.
+ framesize -= wordSize;
+ movptr(Address(rsp, framesize), rbp);
+ }
+
+ if (VerifyStackAtCalls) { // Majik cookie to verify stack depth
+ framesize -= wordSize;
+ movptr(Address(rsp, framesize), (int32_t)0xbadb100d);
+ }
+
+#ifndef _LP64
+ // If method sets FPU control word do it now
+ if (fp_mode_24b) {
+ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
+ }
+ if (UseSSE >= 2 && VerifyFPU) {
+ verify_FPU(0, "FPU stack must be clean on entry");
+ }
+#endif
+
+#ifdef ASSERT
+ if (VerifyStackAtCalls) {
+ Label L;
+ push(rax);
+ mov(rax, rsp);
+ andptr(rax, StackAlignmentInBytes-1);
+ cmpptr(rax, StackAlignmentInBytes-wordSize);
+ pop(rax);
+ jcc(Assembler::equal, L);
+ STOP("Stack is not properly aligned!");
+ bind(L);
+ }
+#endif
+
+}
+
+
+// IndexOf for constant substrings with size >= 8 chars
+// which don't need to be loaded through stack.
+void MacroAssembler::string_indexofC8(Register str1, Register str2,
+ Register cnt1, Register cnt2,
+ int int_cnt2, Register result,
+ XMMRegister vec, Register tmp) {
+ ShortBranchVerifier sbv(this);
+ assert(UseSSE42Intrinsics, "SSE4.2 is required");
+
+ // This method uses pcmpestri inxtruction with bound registers
+ // inputs:
+ // xmm - substring
+ // rax - substring length (elements count)
+ // mem - scanned string
+ // rdx - string length (elements count)
+ // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
+ // outputs:
+ // rcx - matched index in string
+ assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
+
+ Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR,
+ RET_FOUND, RET_NOT_FOUND, EXIT, FOUND_SUBSTR,
+ MATCH_SUBSTR_HEAD, RELOAD_STR, FOUND_CANDIDATE;
+
+ // Note, inline_string_indexOf() generates checks:
+ // if (substr.count > string.count) return -1;
+ // if (substr.count == 0) return 0;
+ assert(int_cnt2 >= 8, "this code isused only for cnt2 >= 8 chars");
+
+ // Load substring.
+ movdqu(vec, Address(str2, 0));
+ movl(cnt2, int_cnt2);
+ movptr(result, str1); // string addr
+
+ if (int_cnt2 > 8) {
+ jmpb(SCAN_TO_SUBSTR);
+
+ // Reload substr for rescan, this code
+ // is executed only for large substrings (> 8 chars)
+ bind(RELOAD_SUBSTR);
+ movdqu(vec, Address(str2, 0));
+ negptr(cnt2); // Jumped here with negative cnt2, convert to positive
+
+ bind(RELOAD_STR);
+ // We came here after the beginning of the substring was
+ // matched but the rest of it was not so we need to search
+ // again. Start from the next element after the previous match.
+
+ // cnt2 is number of substring reminding elements and
+ // cnt1 is number of string reminding elements when cmp failed.
+ // Restored cnt1 = cnt1 - cnt2 + int_cnt2
+ subl(cnt1, cnt2);
+ addl(cnt1, int_cnt2);
+ movl(cnt2, int_cnt2); // Now restore cnt2
+
+ decrementl(cnt1); // Shift to next element
+ cmpl(cnt1, cnt2);
+ jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
+
+ addptr(result, 2);
+
+ } // (int_cnt2 > 8)
+
+ // Scan string for start of substr in 16-byte vectors
+ bind(SCAN_TO_SUBSTR);
+ pcmpestri(vec, Address(result, 0), 0x0d);
+ jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1
+ subl(cnt1, 8);
+ jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
+ cmpl(cnt1, cnt2);
+ jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
+ addptr(result, 16);
+ jmpb(SCAN_TO_SUBSTR);
+
+ // Found a potential substr
+ bind(FOUND_CANDIDATE);
+ // Matched whole vector if first element matched (tmp(rcx) == 0).
+ if (int_cnt2 == 8) {
+ jccb(Assembler::overflow, RET_FOUND); // OF == 1
+ } else { // int_cnt2 > 8
+ jccb(Assembler::overflow, FOUND_SUBSTR);
+ }
+ // After pcmpestri tmp(rcx) contains matched element index
+ // Compute start addr of substr
+ lea(result, Address(result, tmp, Address::times_2));
+
+ // Make sure string is still long enough
+ subl(cnt1, tmp);
+ cmpl(cnt1, cnt2);
+ if (int_cnt2 == 8) {
+ jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
+ } else { // int_cnt2 > 8
+ jccb(Assembler::greaterEqual, MATCH_SUBSTR_HEAD);
+ }
+ // Left less then substring.
+
+ bind(RET_NOT_FOUND);
+ movl(result, -1);
+ jmpb(EXIT);
+
+ if (int_cnt2 > 8) {
+ // This code is optimized for the case when whole substring
+ // is matched if its head is matched.
+ bind(MATCH_SUBSTR_HEAD);
+ pcmpestri(vec, Address(result, 0), 0x0d);
+ // Reload only string if does not match
+ jccb(Assembler::noOverflow, RELOAD_STR); // OF == 0
+
+ Label CONT_SCAN_SUBSTR;
+ // Compare the rest of substring (> 8 chars).
+ bind(FOUND_SUBSTR);
+ // First 8 chars are already matched.
+ negptr(cnt2);
+ addptr(cnt2, 8);
+
+ bind(SCAN_SUBSTR);
+ subl(cnt1, 8);
+ cmpl(cnt2, -8); // Do not read beyond substring
+ jccb(Assembler::lessEqual, CONT_SCAN_SUBSTR);
+ // Back-up strings to avoid reading beyond substring:
+ // cnt1 = cnt1 - cnt2 + 8
+ addl(cnt1, cnt2); // cnt2 is negative
+ addl(cnt1, 8);
+ movl(cnt2, 8); negptr(cnt2);
+ bind(CONT_SCAN_SUBSTR);
+ if (int_cnt2 < (int)G) {
+ movdqu(vec, Address(str2, cnt2, Address::times_2, int_cnt2*2));
+ pcmpestri(vec, Address(result, cnt2, Address::times_2, int_cnt2*2), 0x0d);
+ } else {
+ // calculate index in register to avoid integer overflow (int_cnt2*2)
+ movl(tmp, int_cnt2);
+ addptr(tmp, cnt2);
+ movdqu(vec, Address(str2, tmp, Address::times_2, 0));
+ pcmpestri(vec, Address(result, tmp, Address::times_2, 0), 0x0d);
+ }
+ // Need to reload strings pointers if not matched whole vector
+ jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
+ addptr(cnt2, 8);
+ jcc(Assembler::negative, SCAN_SUBSTR);
+ // Fall through if found full substring
+
+ } // (int_cnt2 > 8)
+
+ bind(RET_FOUND);
+ // Found result if we matched full small substring.
+ // Compute substr offset
+ subptr(result, str1);
+ shrl(result, 1); // index
+ bind(EXIT);
+
+} // string_indexofC8
+
+// Small strings are loaded through stack if they cross page boundary.
+void MacroAssembler::string_indexof(Register str1, Register str2,
+ Register cnt1, Register cnt2,
+ int int_cnt2, Register result,
+ XMMRegister vec, Register tmp) {
+ ShortBranchVerifier sbv(this);
+ assert(UseSSE42Intrinsics, "SSE4.2 is required");
+ //
+ // int_cnt2 is length of small (< 8 chars) constant substring
+ // or (-1) for non constant substring in which case its length
+ // is in cnt2 register.
+ //
+ // Note, inline_string_indexOf() generates checks:
+ // if (substr.count > string.count) return -1;
+ // if (substr.count == 0) return 0;
+ //
+ assert(int_cnt2 == -1 || (0 < int_cnt2 && int_cnt2 < 8), "should be != 0");
+
+ // This method uses pcmpestri inxtruction with bound registers
+ // inputs:
+ // xmm - substring
+ // rax - substring length (elements count)
+ // mem - scanned string
+ // rdx - string length (elements count)
+ // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
+ // outputs:
+ // rcx - matched index in string
+ assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
+
+ Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, ADJUST_STR,
+ RET_FOUND, RET_NOT_FOUND, CLEANUP, FOUND_SUBSTR,
+ FOUND_CANDIDATE;
+
+ { //========================================================
+ // We don't know where these strings are located
+ // and we can't read beyond them. Load them through stack.
+ Label BIG_STRINGS, CHECK_STR, COPY_SUBSTR, COPY_STR;
+
+ movptr(tmp, rsp); // save old SP
+
+ if (int_cnt2 > 0) { // small (< 8 chars) constant substring
+ if (int_cnt2 == 1) { // One char
+ load_unsigned_short(result, Address(str2, 0));
+ movdl(vec, result); // move 32 bits
+ } else if (int_cnt2 == 2) { // Two chars
+ movdl(vec, Address(str2, 0)); // move 32 bits
+ } else if (int_cnt2 == 4) { // Four chars
+ movq(vec, Address(str2, 0)); // move 64 bits
+ } else { // cnt2 = { 3, 5, 6, 7 }
+ // Array header size is 12 bytes in 32-bit VM
+ // + 6 bytes for 3 chars == 18 bytes,
+ // enough space to load vec and shift.
+ assert(HeapWordSize*TypeArrayKlass::header_size() >= 12,"sanity");
+ movdqu(vec, Address(str2, (int_cnt2*2)-16));
+ psrldq(vec, 16-(int_cnt2*2));
+ }
+ } else { // not constant substring
+ cmpl(cnt2, 8);
+ jccb(Assembler::aboveEqual, BIG_STRINGS); // Both strings are big enough
+
+ // We can read beyond string if srt+16 does not cross page boundary
+ // since heaps are aligned and mapped by pages.
+ assert(os::vm_page_size() < (int)G, "default page should be small");
+ movl(result, str2); // We need only low 32 bits
+ andl(result, (os::vm_page_size()-1));
+ cmpl(result, (os::vm_page_size()-16));
+ jccb(Assembler::belowEqual, CHECK_STR);
+
+ // Move small strings to stack to allow load 16 bytes into vec.
+ subptr(rsp, 16);
+ int stk_offset = wordSize-2;
+ push(cnt2);
+
+ bind(COPY_SUBSTR);
+ load_unsigned_short(result, Address(str2, cnt2, Address::times_2, -2));
+ movw(Address(rsp, cnt2, Address::times_2, stk_offset), result);
+ decrement(cnt2);
+ jccb(Assembler::notZero, COPY_SUBSTR);
+
+ pop(cnt2);
+ movptr(str2, rsp); // New substring address
+ } // non constant
+
+ bind(CHECK_STR);
+ cmpl(cnt1, 8);
+ jccb(Assembler::aboveEqual, BIG_STRINGS);
+
+ // Check cross page boundary.
+ movl(result, str1); // We need only low 32 bits
+ andl(result, (os::vm_page_size()-1));
+ cmpl(result, (os::vm_page_size()-16));
+ jccb(Assembler::belowEqual, BIG_STRINGS);
+
+ subptr(rsp, 16);
+ int stk_offset = -2;
+ if (int_cnt2 < 0) { // not constant
+ push(cnt2);
+ stk_offset += wordSize;
+ }
+ movl(cnt2, cnt1);
+
+ bind(COPY_STR);
+ load_unsigned_short(result, Address(str1, cnt2, Address::times_2, -2));
+ movw(Address(rsp, cnt2, Address::times_2, stk_offset), result);
+ decrement(cnt2);
+ jccb(Assembler::notZero, COPY_STR);
+
+ if (int_cnt2 < 0) { // not constant
+ pop(cnt2);
+ }
+ movptr(str1, rsp); // New string address
+
+ bind(BIG_STRINGS);
+ // Load substring.
+ if (int_cnt2 < 0) { // -1
+ movdqu(vec, Address(str2, 0));
+ push(cnt2); // substr count
+ push(str2); // substr addr
+ push(str1); // string addr
+ } else {
+ // Small (< 8 chars) constant substrings are loaded already.
+ movl(cnt2, int_cnt2);
+ }
+ push(tmp); // original SP
+
+ } // Finished loading
+
+ //========================================================
+ // Start search
+ //
+
+ movptr(result, str1); // string addr
+
+ if (int_cnt2 < 0) { // Only for non constant substring
+ jmpb(SCAN_TO_SUBSTR);
+
+ // SP saved at sp+0
+ // String saved at sp+1*wordSize
+ // Substr saved at sp+2*wordSize
+ // Substr count saved at sp+3*wordSize
+
+ // Reload substr for rescan, this code
+ // is executed only for large substrings (> 8 chars)
+ bind(RELOAD_SUBSTR);
+ movptr(str2, Address(rsp, 2*wordSize));
+ movl(cnt2, Address(rsp, 3*wordSize));
+ movdqu(vec, Address(str2, 0));
+ // We came here after the beginning of the substring was
+ // matched but the rest of it was not so we need to search
+ // again. Start from the next element after the previous match.
+ subptr(str1, result); // Restore counter
+ shrl(str1, 1);
+ addl(cnt1, str1);
+ decrementl(cnt1); // Shift to next element
+ cmpl(cnt1, cnt2);
+ jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
+
+ addptr(result, 2);
+ } // non constant
+
+ // Scan string for start of substr in 16-byte vectors
+ bind(SCAN_TO_SUBSTR);
+ assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
+ pcmpestri(vec, Address(result, 0), 0x0d);
+ jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1
+ subl(cnt1, 8);
+ jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
+ cmpl(cnt1, cnt2);
+ jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
+ addptr(result, 16);
+
+ bind(ADJUST_STR);
+ cmpl(cnt1, 8); // Do not read beyond string
+ jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
+ // Back-up string to avoid reading beyond string.
+ lea(result, Address(result, cnt1, Address::times_2, -16));
+ movl(cnt1, 8);
+ jmpb(SCAN_TO_SUBSTR);
+
+ // Found a potential substr
+ bind(FOUND_CANDIDATE);
+ // After pcmpestri tmp(rcx) contains matched element index
+
+ // Make sure string is still long enough
+ subl(cnt1, tmp);
+ cmpl(cnt1, cnt2);
+ jccb(Assembler::greaterEqual, FOUND_SUBSTR);
+ // Left less then substring.
+
+ bind(RET_NOT_FOUND);
+ movl(result, -1);
+ jmpb(CLEANUP);
+
+ bind(FOUND_SUBSTR);
+ // Compute start addr of substr
+ lea(result, Address(result, tmp, Address::times_2));
+
+ if (int_cnt2 > 0) { // Constant substring
+ // Repeat search for small substring (< 8 chars)
+ // from new point without reloading substring.
+ // Have to check that we don't read beyond string.
+ cmpl(tmp, 8-int_cnt2);
+ jccb(Assembler::greater, ADJUST_STR);
+ // Fall through if matched whole substring.
+ } else { // non constant
+ assert(int_cnt2 == -1, "should be != 0");
+
+ addl(tmp, cnt2);
+ // Found result if we matched whole substring.
+ cmpl(tmp, 8);
+ jccb(Assembler::lessEqual, RET_FOUND);
+
+ // Repeat search for small substring (<= 8 chars)
+ // from new point 'str1' without reloading substring.
+ cmpl(cnt2, 8);
+ // Have to check that we don't read beyond string.
+ jccb(Assembler::lessEqual, ADJUST_STR);
+
+ Label CHECK_NEXT, CONT_SCAN_SUBSTR, RET_FOUND_LONG;
+ // Compare the rest of substring (> 8 chars).
+ movptr(str1, result);
+
+ cmpl(tmp, cnt2);
+ // First 8 chars are already matched.
+ jccb(Assembler::equal, CHECK_NEXT);
+
+ bind(SCAN_SUBSTR);
+ pcmpestri(vec, Address(str1, 0), 0x0d);
+ // Need to reload strings pointers if not matched whole vector
+ jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
+
+ bind(CHECK_NEXT);
+ subl(cnt2, 8);
+ jccb(Assembler::lessEqual, RET_FOUND_LONG); // Found full substring
+ addptr(str1, 16);
+ addptr(str2, 16);
+ subl(cnt1, 8);
+ cmpl(cnt2, 8); // Do not read beyond substring
+ jccb(Assembler::greaterEqual, CONT_SCAN_SUBSTR);
+ // Back-up strings to avoid reading beyond substring.
+ lea(str2, Address(str2, cnt2, Address::times_2, -16));
+ lea(str1, Address(str1, cnt2, Address::times_2, -16));
+ subl(cnt1, cnt2);
+ movl(cnt2, 8);
+ addl(cnt1, 8);
+ bind(CONT_SCAN_SUBSTR);
+ movdqu(vec, Address(str2, 0));
+ jmpb(SCAN_SUBSTR);
+
+ bind(RET_FOUND_LONG);
+ movptr(str1, Address(rsp, wordSize));
+ } // non constant
+
+ bind(RET_FOUND);
+ // Compute substr offset
+ subptr(result, str1);
+ shrl(result, 1); // index
+
+ bind(CLEANUP);
+ pop(rsp); // restore SP
+
+} // string_indexof
+
+// Compare strings.
+void MacroAssembler::string_compare(Register str1, Register str2,
+ Register cnt1, Register cnt2, Register result,
+ XMMRegister vec1) {
+ ShortBranchVerifier sbv(this);
+ Label LENGTH_DIFF_LABEL, POP_LABEL, DONE_LABEL, WHILE_HEAD_LABEL;
+
+ // Compute the minimum of the string lengths and the
+ // difference of the string lengths (stack).
+ // Do the conditional move stuff
+ movl(result, cnt1);
+ subl(cnt1, cnt2);
+ push(cnt1);
+ cmov32(Assembler::lessEqual, cnt2, result);
+
+ // Is the minimum length zero?
+ testl(cnt2, cnt2);
+ jcc(Assembler::zero, LENGTH_DIFF_LABEL);
+
+ // Load first characters
+ load_unsigned_short(result, Address(str1, 0));
+ load_unsigned_short(cnt1, Address(str2, 0));
+
+ // Compare first characters
+ subl(result, cnt1);
+ jcc(Assembler::notZero, POP_LABEL);
+ decrementl(cnt2);
+ jcc(Assembler::zero, LENGTH_DIFF_LABEL);
+
+ {
+ // Check after comparing first character to see if strings are equivalent
+ Label LSkip2;
+ // Check if the strings start at same location
+ cmpptr(str1, str2);
+ jccb(Assembler::notEqual, LSkip2);
+
+ // Check if the length difference is zero (from stack)
+ cmpl(Address(rsp, 0), 0x0);
+ jcc(Assembler::equal, LENGTH_DIFF_LABEL);
+
+ // Strings might not be equivalent
+ bind(LSkip2);
+ }
+
+ Address::ScaleFactor scale = Address::times_2;
+ int stride = 8;
+
+ // Advance to next element
+ addptr(str1, 16/stride);
+ addptr(str2, 16/stride);
+
+ if (UseSSE42Intrinsics) {
+ Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL;
+ int pcmpmask = 0x19;
+ // Setup to compare 16-byte vectors
+ movl(result, cnt2);
+ andl(cnt2, ~(stride - 1)); // cnt2 holds the vector count
+ jccb(Assembler::zero, COMPARE_TAIL);
+
+ lea(str1, Address(str1, result, scale));
+ lea(str2, Address(str2, result, scale));
+ negptr(result);
+
+ // pcmpestri
+ // inputs:
+ // vec1- substring
+ // rax - negative string length (elements count)
+ // mem - scaned string
+ // rdx - string length (elements count)
+ // pcmpmask - cmp mode: 11000 (string compare with negated result)
+ // + 00 (unsigned bytes) or + 01 (unsigned shorts)
+ // outputs:
+ // rcx - first mismatched element index
+ assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri");
+
+ bind(COMPARE_WIDE_VECTORS);
+ movdqu(vec1, Address(str1, result, scale));
+ pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
+ // After pcmpestri cnt1(rcx) contains mismatched element index
+
+ jccb(Assembler::below, VECTOR_NOT_EQUAL); // CF==1
+ addptr(result, stride);
+ subptr(cnt2, stride);
+ jccb(Assembler::notZero, COMPARE_WIDE_VECTORS);
+
+ // compare wide vectors tail
+ testl(result, result);
+ jccb(Assembler::zero, LENGTH_DIFF_LABEL);
+
+ movl(cnt2, stride);
+ movl(result, stride);
+ negptr(result);
+ movdqu(vec1, Address(str1, result, scale));
+ pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
+ jccb(Assembler::aboveEqual, LENGTH_DIFF_LABEL);
+
+ // Mismatched characters in the vectors
+ bind(VECTOR_NOT_EQUAL);
+ addptr(result, cnt1);
+ movptr(cnt2, result);
+ load_unsigned_short(result, Address(str1, cnt2, scale));
+ load_unsigned_short(cnt1, Address(str2, cnt2, scale));
+ subl(result, cnt1);
+ jmpb(POP_LABEL);
+
+ bind(COMPARE_TAIL); // limit is zero
+ movl(cnt2, result);
+ // Fallthru to tail compare
+ }
+
+ // Shift str2 and str1 to the end of the arrays, negate min
+ lea(str1, Address(str1, cnt2, scale, 0));
+ lea(str2, Address(str2, cnt2, scale, 0));
+ negptr(cnt2);
+
+ // Compare the rest of the elements
+ bind(WHILE_HEAD_LABEL);
+ load_unsigned_short(result, Address(str1, cnt2, scale, 0));
+ load_unsigned_short(cnt1, Address(str2, cnt2, scale, 0));
+ subl(result, cnt1);
+ jccb(Assembler::notZero, POP_LABEL);
+ increment(cnt2);
+ jccb(Assembler::notZero, WHILE_HEAD_LABEL);
+
+ // Strings are equal up to min length. Return the length difference.
+ bind(LENGTH_DIFF_LABEL);
+ pop(result);
+ jmpb(DONE_LABEL);
+
+ // Discard the stored length difference
+ bind(POP_LABEL);
+ pop(cnt1);
+
+ // That's it
+ bind(DONE_LABEL);
+}
+
+// Compare char[] arrays aligned to 4 bytes or substrings.
+void MacroAssembler::char_arrays_equals(bool is_array_equ, Register ary1, Register ary2,
+ Register limit, Register result, Register chr,
+ XMMRegister vec1, XMMRegister vec2) {
+ ShortBranchVerifier sbv(this);
+ Label TRUE_LABEL, FALSE_LABEL, DONE, COMPARE_VECTORS, COMPARE_CHAR;
+
+ int length_offset = arrayOopDesc::length_offset_in_bytes();
+ int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
+
+ // Check the input args
+ cmpptr(ary1, ary2);
+ jcc(Assembler::equal, TRUE_LABEL);
+
+ if (is_array_equ) {
+ // Need additional checks for arrays_equals.
+ testptr(ary1, ary1);
+ jcc(Assembler::zero, FALSE_LABEL);
+ testptr(ary2, ary2);
+ jcc(Assembler::zero, FALSE_LABEL);
+
+ // Check the lengths
+ movl(limit, Address(ary1, length_offset));
+ cmpl(limit, Address(ary2, length_offset));
+ jcc(Assembler::notEqual, FALSE_LABEL);
+ }
+
+ // count == 0
+ testl(limit, limit);
+ jcc(Assembler::zero, TRUE_LABEL);
+
+ if (is_array_equ) {
+ // Load array address
+ lea(ary1, Address(ary1, base_offset));
+ lea(ary2, Address(ary2, base_offset));
+ }
+
+ shll(limit, 1); // byte count != 0
+ movl(result, limit); // copy
+
+ if (UseSSE42Intrinsics) {
+ // With SSE4.2, use double quad vector compare
+ Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
+
+ // Compare 16-byte vectors
+ andl(result, 0x0000000e); // tail count (in bytes)
+ andl(limit, 0xfffffff0); // vector count (in bytes)
+ jccb(Assembler::zero, COMPARE_TAIL);
+
+ lea(ary1, Address(ary1, limit, Address::times_1));
+ lea(ary2, Address(ary2, limit, Address::times_1));
+ negptr(limit);
+
+ bind(COMPARE_WIDE_VECTORS);
+ movdqu(vec1, Address(ary1, limit, Address::times_1));
+ movdqu(vec2, Address(ary2, limit, Address::times_1));
+ pxor(vec1, vec2);
+
+ ptest(vec1, vec1);
+ jccb(Assembler::notZero, FALSE_LABEL);
+ addptr(limit, 16);
+ jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
+
+ testl(result, result);
+ jccb(Assembler::zero, TRUE_LABEL);
+
+ movdqu(vec1, Address(ary1, result, Address::times_1, -16));
+ movdqu(vec2, Address(ary2, result, Address::times_1, -16));
+ pxor(vec1, vec2);
+
+ ptest(vec1, vec1);
+ jccb(Assembler::notZero, FALSE_LABEL);
+ jmpb(TRUE_LABEL);
+
+ bind(COMPARE_TAIL); // limit is zero
+ movl(limit, result);
+ // Fallthru to tail compare
+ }
+
+ // Compare 4-byte vectors
+ andl(limit, 0xfffffffc); // vector count (in bytes)
+ jccb(Assembler::zero, COMPARE_CHAR);
+
+ lea(ary1, Address(ary1, limit, Address::times_1));
+ lea(ary2, Address(ary2, limit, Address::times_1));
+ negptr(limit);
+
+ bind(COMPARE_VECTORS);
+ movl(chr, Address(ary1, limit, Address::times_1));
+ cmpl(chr, Address(ary2, limit, Address::times_1));
+ jccb(Assembler::notEqual, FALSE_LABEL);
+ addptr(limit, 4);
+ jcc(Assembler::notZero, COMPARE_VECTORS);
+
+ // Compare trailing char (final 2 bytes), if any
+ bind(COMPARE_CHAR);
+ testl(result, 0x2); // tail char
+ jccb(Assembler::zero, TRUE_LABEL);
+ load_unsigned_short(chr, Address(ary1, 0));
+ load_unsigned_short(limit, Address(ary2, 0));
+ cmpl(chr, limit);
+ jccb(Assembler::notEqual, FALSE_LABEL);
+
+ bind(TRUE_LABEL);
+ movl(result, 1); // return true
+ jmpb(DONE);
+
+ bind(FALSE_LABEL);
+ xorl(result, result); // return false
+
+ // That's it
+ bind(DONE);
+}
+
+void MacroAssembler::generate_fill(BasicType t, bool aligned,
+ Register to, Register value, Register count,
+ Register rtmp, XMMRegister xtmp) {
+ ShortBranchVerifier sbv(this);
+ assert_different_registers(to, value, count, rtmp);
+ Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
+ Label L_fill_2_bytes, L_fill_4_bytes;
+
+ int shift = -1;
+ switch (t) {
+ case T_BYTE:
+ shift = 2;
+ break;
+ case T_SHORT:
+ shift = 1;
+ break;
+ case T_INT:
+ shift = 0;
+ break;
+ default: ShouldNotReachHere();
+ }
+
+ if (t == T_BYTE) {
+ andl(value, 0xff);
+ movl(rtmp, value);
+ shll(rtmp, 8);
+ orl(value, rtmp);
+ }
+ if (t == T_SHORT) {
+ andl(value, 0xffff);
+ }
+ if (t == T_BYTE || t == T_SHORT) {
+ movl(rtmp, value);
+ shll(rtmp, 16);
+ orl(value, rtmp);
+ }
+
+ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
+ jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
+ if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
+ // align source address at 4 bytes address boundary
+ if (t == T_BYTE) {
+ // One byte misalignment happens only for byte arrays
+ testptr(to, 1);
+ jccb(Assembler::zero, L_skip_align1);
+ movb(Address(to, 0), value);
+ increment(to);
+ decrement(count);
+ BIND(L_skip_align1);
+ }
+ // Two bytes misalignment happens only for byte and short (char) arrays
+ testptr(to, 2);
+ jccb(Assembler::zero, L_skip_align2);
+ movw(Address(to, 0), value);
+ addptr(to, 2);
+ subl(count, 1<<(shift-1));
+ BIND(L_skip_align2);
+ }
+ if (UseSSE < 2) {
+ Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
+ // Fill 32-byte chunks
+ subl(count, 8 << shift);
+ jcc(Assembler::less, L_check_fill_8_bytes);
+ align(16);
+
+ BIND(L_fill_32_bytes_loop);
+
+ for (int i = 0; i < 32; i += 4) {
+ movl(Address(to, i), value);
+ }
+
+ addptr(to, 32);
+ subl(count, 8 << shift);
+ jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
+ BIND(L_check_fill_8_bytes);
+ addl(count, 8 << shift);
+ jccb(Assembler::zero, L_exit);
+ jmpb(L_fill_8_bytes);
+
+ //
+ // length is too short, just fill qwords
+ //
+ BIND(L_fill_8_bytes_loop);
+ movl(Address(to, 0), value);
+ movl(Address(to, 4), value);
+ addptr(to, 8);
+ BIND(L_fill_8_bytes);
+ subl(count, 1 << (shift + 1));
+ jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
+ // fall through to fill 4 bytes
+ } else {
+ Label L_fill_32_bytes;
+ if (!UseUnalignedLoadStores) {
+ // align to 8 bytes, we know we are 4 byte aligned to start
+ testptr(to, 4);
+ jccb(Assembler::zero, L_fill_32_bytes);
+ movl(Address(to, 0), value);
+ addptr(to, 4);
+ subl(count, 1<<shift);
+ }
+ BIND(L_fill_32_bytes);
+ {
+ assert( UseSSE >= 2, "supported cpu only" );
+ Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
+ // Fill 32-byte chunks
+ movdl(xtmp, value);
+ pshufd(xtmp, xtmp, 0);
+
+ subl(count, 8 << shift);
+ jcc(Assembler::less, L_check_fill_8_bytes);
+ align(16);
+
+ BIND(L_fill_32_bytes_loop);
+
+ if (UseUnalignedLoadStores) {
+ movdqu(Address(to, 0), xtmp);
+ movdqu(Address(to, 16), xtmp);
+ } else {
+ movq(Address(to, 0), xtmp);
+ movq(Address(to, 8), xtmp);
+ movq(Address(to, 16), xtmp);
+ movq(Address(to, 24), xtmp);
+ }
+
+ addptr(to, 32);
+ subl(count, 8 << shift);
+ jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
+ BIND(L_check_fill_8_bytes);
+ addl(count, 8 << shift);
+ jccb(Assembler::zero, L_exit);
+ jmpb(L_fill_8_bytes);
+
+ //
+ // length is too short, just fill qwords
+ //
+ BIND(L_fill_8_bytes_loop);
+ movq(Address(to, 0), xtmp);
+ addptr(to, 8);
+ BIND(L_fill_8_bytes);
+ subl(count, 1 << (shift + 1));
+ jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
+ }
+ }
+ // fill trailing 4 bytes
+ BIND(L_fill_4_bytes);
+ testl(count, 1<<shift);
+ jccb(Assembler::zero, L_fill_2_bytes);
+ movl(Address(to, 0), value);
+ if (t == T_BYTE || t == T_SHORT) {
+ addptr(to, 4);
+ BIND(L_fill_2_bytes);
+ // fill trailing 2 bytes
+ testl(count, 1<<(shift-1));
+ jccb(Assembler::zero, L_fill_byte);
+ movw(Address(to, 0), value);
+ if (t == T_BYTE) {
+ addptr(to, 2);
+ BIND(L_fill_byte);
+ // fill trailing byte
+ testl(count, 1);
+ jccb(Assembler::zero, L_exit);
+ movb(Address(to, 0), value);
+ } else {
+ BIND(L_fill_byte);
+ }
+ } else {
+ BIND(L_fill_2_bytes);
+ }
+ BIND(L_exit);
+}
+#undef BIND
+#undef BLOCK_COMMENT
+
+
+Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
+ switch (cond) {
+ // Note some conditions are synonyms for others
+ case Assembler::zero: return Assembler::notZero;
+ case Assembler::notZero: return Assembler::zero;
+ case Assembler::less: return Assembler::greaterEqual;
+ case Assembler::lessEqual: return Assembler::greater;
+ case Assembler::greater: return Assembler::lessEqual;
+ case Assembler::greaterEqual: return Assembler::less;
+ case Assembler::below: return Assembler::aboveEqual;
+ case Assembler::belowEqual: return Assembler::above;
+ case Assembler::above: return Assembler::belowEqual;
+ case Assembler::aboveEqual: return Assembler::below;
+ case Assembler::overflow: return Assembler::noOverflow;
+ case Assembler::noOverflow: return Assembler::overflow;
+ case Assembler::negative: return Assembler::positive;
+ case Assembler::positive: return Assembler::negative;
+ case Assembler::parity: return Assembler::noParity;
+ case Assembler::noParity: return Assembler::parity;
+ }
+ ShouldNotReachHere(); return Assembler::overflow;
+}
+
+SkipIfEqual::SkipIfEqual(
+ MacroAssembler* masm, const bool* flag_addr, bool value) {
+ _masm = masm;
+ _masm->cmp8(ExternalAddress((address)flag_addr), value);
+ _masm->jcc(Assembler::equal, _label);
+}
+
+SkipIfEqual::~SkipIfEqual() {
+ _masm->bind(_label);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,1172 @@
+/*
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_MACROASSEMBLER_X86_HPP
+#define CPU_X86_VM_MACROASSEMBLER_X86_HPP
+
+#include "asm/assembler.hpp"
+
+
+// MacroAssembler extends Assembler by frequently used macros.
+//
+// Instructions for which a 'better' code sequence exists depending
+// on arguments should also go in here.
+
+class MacroAssembler: public Assembler {
+ friend class LIR_Assembler;
+ friend class Runtime1; // as_Address()
+
+ protected:
+
+ Address as_Address(AddressLiteral adr);
+ Address as_Address(ArrayAddress adr);
+
+ // Support for VM calls
+ //
+ // This is the base routine called by the different versions of call_VM_leaf. The interpreter
+ // may customize this version by overriding it for its purposes (e.g., to save/restore
+ // additional registers when doing a VM call).
+#ifdef CC_INTERP
+ // c++ interpreter never wants to use interp_masm version of call_VM
+ #define VIRTUAL
+#else
+ #define VIRTUAL virtual
+#endif
+
+ VIRTUAL void call_VM_leaf_base(
+ address entry_point, // the entry point
+ int number_of_arguments // the number of arguments to pop after the call
+ );
+
+ // This is the base routine called by the different versions of call_VM. The interpreter
+ // may customize this version by overriding it for its purposes (e.g., to save/restore
+ // additional registers when doing a VM call).
+ //
+ // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base
+ // returns the register which contains the thread upon return. If a thread register has been
+ // specified, the return value will correspond to that register. If no last_java_sp is specified
+ // (noreg) than rsp will be used instead.
+ VIRTUAL void call_VM_base( // returns the register containing the thread upon return
+ Register oop_result, // where an oop-result ends up if any; use noreg otherwise
+ Register java_thread, // the thread if computed before ; use noreg otherwise
+ Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
+ address entry_point, // the entry point
+ int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
+ bool check_exceptions // whether to check for pending exceptions after return
+ );
+
+ // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
+ // The implementation is only non-empty for the InterpreterMacroAssembler,
+ // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
+ virtual void check_and_handle_popframe(Register java_thread);
+ virtual void check_and_handle_earlyret(Register java_thread);
+
+ void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
+
+ // helpers for FPU flag access
+ // tmp is a temporary register, if none is available use noreg
+ void save_rax (Register tmp);
+ void restore_rax(Register tmp);
+
+ public:
+ MacroAssembler(CodeBuffer* code) : Assembler(code) {}
+
+ // Support for NULL-checks
+ //
+ // Generates code that causes a NULL OS exception if the content of reg is NULL.
+ // If the accessed location is M[reg + offset] and the offset is known, provide the
+ // offset. No explicit code generation is needed if the offset is within a certain
+ // range (0 <= offset <= page_size).
+
+ void null_check(Register reg, int offset = -1);
+ static bool needs_explicit_null_check(intptr_t offset);
+
+ // Required platform-specific helpers for Label::patch_instructions.
+ // They _shadow_ the declarations in AbstractAssembler, which are undefined.
+ void pd_patch_instruction(address branch, address target) {
+ unsigned char op = branch[0];
+ assert(op == 0xE8 /* call */ ||
+ op == 0xE9 /* jmp */ ||
+ op == 0xEB /* short jmp */ ||
+ (op & 0xF0) == 0x70 /* short jcc */ ||
+ op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */,
+ "Invalid opcode at patch point");
+
+ if (op == 0xEB || (op & 0xF0) == 0x70) {
+ // short offset operators (jmp and jcc)
+ char* disp = (char*) &branch[1];
+ int imm8 = target - (address) &disp[1];
+ guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
+ *disp = imm8;
+ } else {
+ int* disp = (int*) &branch[(op == 0x0F)? 2: 1];
+ int imm32 = target - (address) &disp[1];
+ *disp = imm32;
+ }
+ }
+
+#ifndef PRODUCT
+ static void pd_print_patched_instruction(address branch) {
+ const char* s;
+ unsigned char op = branch[0];
+ if (op == 0xE8) {
+ s = "call";
+ } else if (op == 0xE9 || op == 0xEB) {
+ s = "jmp";
+ } else if ((op & 0xF0) == 0x70) {
+ s = "jcc";
+ } else if (op == 0x0F) {
+ s = "jcc";
+ } else {
+ s = "????";
+ }
+ tty->print("%s (unresolved)", s);
+ }
+#endif
+
+ // The following 4 methods return the offset of the appropriate move instruction
+
+ // Support for fast byte/short loading with zero extension (depending on particular CPU)
+ int load_unsigned_byte(Register dst, Address src);
+ int load_unsigned_short(Register dst, Address src);
+
+ // Support for fast byte/short loading with sign extension (depending on particular CPU)
+ int load_signed_byte(Register dst, Address src);
+ int load_signed_short(Register dst, Address src);
+
+ // Support for sign-extension (hi:lo = extend_sign(lo))
+ void extend_sign(Register hi, Register lo);
+
+ // Load and store values by size and signed-ness
+ void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
+ void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
+
+ // Support for inc/dec with optimal instruction selection depending on value
+
+ void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; }
+ void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
+
+ void decrementl(Address dst, int value = 1);
+ void decrementl(Register reg, int value = 1);
+
+ void decrementq(Register reg, int value = 1);
+ void decrementq(Address dst, int value = 1);
+
+ void incrementl(Address dst, int value = 1);
+ void incrementl(Register reg, int value = 1);
+
+ void incrementq(Register reg, int value = 1);
+ void incrementq(Address dst, int value = 1);
+
+
+ // Support optimal SSE move instructions.
+ void movflt(XMMRegister dst, XMMRegister src) {
+ if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
+ else { movss (dst, src); return; }
+ }
+ void movflt(XMMRegister dst, Address src) { movss(dst, src); }
+ void movflt(XMMRegister dst, AddressLiteral src);
+ void movflt(Address dst, XMMRegister src) { movss(dst, src); }
+
+ void movdbl(XMMRegister dst, XMMRegister src) {
+ if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
+ else { movsd (dst, src); return; }
+ }
+
+ void movdbl(XMMRegister dst, AddressLiteral src);
+
+ void movdbl(XMMRegister dst, Address src) {
+ if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
+ else { movlpd(dst, src); return; }
+ }
+ void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
+
+ void incrementl(AddressLiteral dst);
+ void incrementl(ArrayAddress dst);
+
+ // Alignment
+ void align(int modulus);
+
+ // A 5 byte nop that is safe for patching (see patch_verified_entry)
+ void fat_nop();
+
+ // Stack frame creation/removal
+ void enter();
+ void leave();
+
+ // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information)
+ // The pointer will be loaded into the thread register.
+ void get_thread(Register thread);
+
+
+ // Support for VM calls
+ //
+ // It is imperative that all calls into the VM are handled via the call_VM macros.
+ // They make sure that the stack linkage is setup correctly. call_VM's correspond
+ // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
+
+
+ void call_VM(Register oop_result,
+ address entry_point,
+ bool check_exceptions = true);
+ void call_VM(Register oop_result,
+ address entry_point,
+ Register arg_1,
+ bool check_exceptions = true);
+ void call_VM(Register oop_result,
+ address entry_point,
+ Register arg_1, Register arg_2,
+ bool check_exceptions = true);
+ void call_VM(Register oop_result,
+ address entry_point,
+ Register arg_1, Register arg_2, Register arg_3,
+ bool check_exceptions = true);
+
+ // Overloadings with last_Java_sp
+ void call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ int number_of_arguments = 0,
+ bool check_exceptions = true);
+ void call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ Register arg_1, bool
+ check_exceptions = true);
+ void call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ Register arg_1, Register arg_2,
+ bool check_exceptions = true);
+ void call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ Register arg_1, Register arg_2, Register arg_3,
+ bool check_exceptions = true);
+
+ void get_vm_result (Register oop_result, Register thread);
+ void get_vm_result_2(Register metadata_result, Register thread);
+
+ // These always tightly bind to MacroAssembler::call_VM_base
+ // bypassing the virtual implementation
+ void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
+ void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
+ void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
+ void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
+ void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
+
+ void call_VM_leaf(address entry_point,
+ int number_of_arguments = 0);
+ void call_VM_leaf(address entry_point,
+ Register arg_1);
+ void call_VM_leaf(address entry_point,
+ Register arg_1, Register arg_2);
+ void call_VM_leaf(address entry_point,
+ Register arg_1, Register arg_2, Register arg_3);
+
+ // These always tightly bind to MacroAssembler::call_VM_leaf_base
+ // bypassing the virtual implementation
+ void super_call_VM_leaf(address entry_point);
+ void super_call_VM_leaf(address entry_point, Register arg_1);
+ void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
+ void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
+ void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
+
+ // last Java Frame (fills frame anchor)
+ void set_last_Java_frame(Register thread,
+ Register last_java_sp,
+ Register last_java_fp,
+ address last_java_pc);
+
+ // thread in the default location (r15_thread on 64bit)
+ void set_last_Java_frame(Register last_java_sp,
+ Register last_java_fp,
+ address last_java_pc);
+
+ void reset_last_Java_frame(Register thread, bool clear_fp, bool clear_pc);
+
+ // thread in the default location (r15_thread on 64bit)
+ void reset_last_Java_frame(bool clear_fp, bool clear_pc);
+
+ // Stores
+ void store_check(Register obj); // store check for obj - register is destroyed afterwards
+ void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed)
+
+#ifndef SERIALGC
+
+ void g1_write_barrier_pre(Register obj,
+ Register pre_val,
+ Register thread,
+ Register tmp,
+ bool tosca_live,
+ bool expand_call);
+
+ void g1_write_barrier_post(Register store_addr,
+ Register new_val,
+ Register thread,
+ Register tmp,
+ Register tmp2);
+
+#endif // SERIALGC
+
+ // split store_check(Register obj) to enhance instruction interleaving
+ void store_check_part_1(Register obj);
+ void store_check_part_2(Register obj);
+
+ // C 'boolean' to Java boolean: x == 0 ? 0 : 1
+ void c2bool(Register x);
+
+ // C++ bool manipulation
+
+ void movbool(Register dst, Address src);
+ void movbool(Address dst, bool boolconst);
+ void movbool(Address dst, Register src);
+ void testbool(Register dst);
+
+ // oop manipulations
+ void load_klass(Register dst, Register src);
+ void store_klass(Register dst, Register src);
+
+ void load_heap_oop(Register dst, Address src);
+ void load_heap_oop_not_null(Register dst, Address src);
+ void store_heap_oop(Address dst, Register src);
+ void cmp_heap_oop(Register src1, Address src2, Register tmp = noreg);
+
+ // Used for storing NULL. All other oop constants should be
+ // stored using routines that take a jobject.
+ void store_heap_oop_null(Address dst);
+
+ void load_prototype_header(Register dst, Register src);
+
+#ifdef _LP64
+ void store_klass_gap(Register dst, Register src);
+
+ // This dummy is to prevent a call to store_heap_oop from
+ // converting a zero (like NULL) into a Register by giving
+ // the compiler two choices it can't resolve
+
+ void store_heap_oop(Address dst, void* dummy);
+
+ void encode_heap_oop(Register r);
+ void decode_heap_oop(Register r);
+ void encode_heap_oop_not_null(Register r);
+ void decode_heap_oop_not_null(Register r);
+ void encode_heap_oop_not_null(Register dst, Register src);
+ void decode_heap_oop_not_null(Register dst, Register src);
+
+ void set_narrow_oop(Register dst, jobject obj);
+ void set_narrow_oop(Address dst, jobject obj);
+ void cmp_narrow_oop(Register dst, jobject obj);
+ void cmp_narrow_oop(Address dst, jobject obj);
+
+ void encode_klass_not_null(Register r);
+ void decode_klass_not_null(Register r);
+ void encode_klass_not_null(Register dst, Register src);
+ void decode_klass_not_null(Register dst, Register src);
+ void set_narrow_klass(Register dst, Klass* k);
+ void set_narrow_klass(Address dst, Klass* k);
+ void cmp_narrow_klass(Register dst, Klass* k);
+ void cmp_narrow_klass(Address dst, Klass* k);
+
+ // if heap base register is used - reinit it with the correct value
+ void reinit_heapbase();
+
+ DEBUG_ONLY(void verify_heapbase(const char* msg);)
+
+#endif // _LP64
+
+ // Int division/remainder for Java
+ // (as idivl, but checks for special case as described in JVM spec.)
+ // returns idivl instruction offset for implicit exception handling
+ int corrected_idivl(Register reg);
+
+ // Long division/remainder for Java
+ // (as idivq, but checks for special case as described in JVM spec.)
+ // returns idivq instruction offset for implicit exception handling
+ int corrected_idivq(Register reg);
+
+ void int3();
+
+ // Long operation macros for a 32bit cpu
+ // Long negation for Java
+ void lneg(Register hi, Register lo);
+
+ // Long multiplication for Java
+ // (destroys contents of eax, ebx, ecx and edx)
+ void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
+
+ // Long shifts for Java
+ // (semantics as described in JVM spec.)
+ void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f)
+ void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f)
+
+ // Long compare for Java
+ // (semantics as described in JVM spec.)
+ void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
+
+
+ // misc
+
+ // Sign extension
+ void sign_extend_short(Register reg);
+ void sign_extend_byte(Register reg);
+
+ // Division by power of 2, rounding towards 0
+ void division_with_shift(Register reg, int shift_value);
+
+ // Compares the top-most stack entries on the FPU stack and sets the eflags as follows:
+ //
+ // CF (corresponds to C0) if x < y
+ // PF (corresponds to C2) if unordered
+ // ZF (corresponds to C3) if x = y
+ //
+ // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
+ // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code)
+ void fcmp(Register tmp);
+ // Variant of the above which allows y to be further down the stack
+ // and which only pops x and y if specified. If pop_right is
+ // specified then pop_left must also be specified.
+ void fcmp(Register tmp, int index, bool pop_left, bool pop_right);
+
+ // Floating-point comparison for Java
+ // Compares the top-most stack entries on the FPU stack and stores the result in dst.
+ // The arguments are in reversed order on the stack (i.e., top of stack is first argument).
+ // (semantics as described in JVM spec.)
+ void fcmp2int(Register dst, bool unordered_is_less);
+ // Variant of the above which allows y to be further down the stack
+ // and which only pops x and y if specified. If pop_right is
+ // specified then pop_left must also be specified.
+ void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right);
+
+ // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards)
+ // tmp is a temporary register, if none is available use noreg
+ void fremr(Register tmp);
+
+
+ // same as fcmp2int, but using SSE2
+ void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
+ void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
+
+ // Inlined sin/cos generator for Java; must not use CPU instruction
+ // directly on Intel as it does not have high enough precision
+ // outside of the range [-pi/4, pi/4]. Extra argument indicate the
+ // number of FPU stack slots in use; all but the topmost will
+ // require saving if a slow case is necessary. Assumes argument is
+ // on FP TOS; result is on FP TOS. No cpu registers are changed by
+ // this code.
+ void trigfunc(char trig, int num_fpu_regs_in_use = 1);
+
+ // branch to L if FPU flag C2 is set/not set
+ // tmp is a temporary register, if none is available use noreg
+ void jC2 (Register tmp, Label& L);
+ void jnC2(Register tmp, Label& L);
+
+ // Pop ST (ffree & fincstp combined)
+ void fpop();
+
+ // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
+ void push_fTOS();
+
+ // pops double TOS element from CPU stack and pushes on FPU stack
+ void pop_fTOS();
+
+ void empty_FPU_stack();
+
+ void push_IU_state();
+ void pop_IU_state();
+
+ void push_FPU_state();
+ void pop_FPU_state();
+
+ void push_CPU_state();
+ void pop_CPU_state();
+
+ // Round up to a power of two
+ void round_to(Register reg, int modulus);
+
+ // Callee saved registers handling
+ void push_callee_saved_registers();
+ void pop_callee_saved_registers();
+
+ // allocation
+ void eden_allocate(
+ Register obj, // result: pointer to object after successful allocation
+ Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
+ int con_size_in_bytes, // object size in bytes if known at compile time
+ Register t1, // temp register
+ Label& slow_case // continuation point if fast allocation fails
+ );
+ void tlab_allocate(
+ Register obj, // result: pointer to object after successful allocation
+ Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
+ int con_size_in_bytes, // object size in bytes if known at compile time
+ Register t1, // temp register
+ Register t2, // temp register
+ Label& slow_case // continuation point if fast allocation fails
+ );
+ Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address
+ void incr_allocated_bytes(Register thread,
+ Register var_size_in_bytes, int con_size_in_bytes,
+ Register t1 = noreg);
+
+ // interface method calling
+ void lookup_interface_method(Register recv_klass,
+ Register intf_klass,
+ RegisterOrConstant itable_index,
+ Register method_result,
+ Register scan_temp,
+ Label& no_such_interface);
+
+ // virtual method calling
+ void lookup_virtual_method(Register recv_klass,
+ RegisterOrConstant vtable_index,
+ Register method_result);
+
+ // Test sub_klass against super_klass, with fast and slow paths.
+
+ // The fast path produces a tri-state answer: yes / no / maybe-slow.
+ // One of the three labels can be NULL, meaning take the fall-through.
+ // If super_check_offset is -1, the value is loaded up from super_klass.
+ // No registers are killed, except temp_reg.
+ void check_klass_subtype_fast_path(Register sub_klass,
+ Register super_klass,
+ Register temp_reg,
+ Label* L_success,
+ Label* L_failure,
+ Label* L_slow_path,
+ RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
+
+ // The rest of the type check; must be wired to a corresponding fast path.
+ // It does not repeat the fast path logic, so don't use it standalone.
+ // The temp_reg and temp2_reg can be noreg, if no temps are available.
+ // Updates the sub's secondary super cache as necessary.
+ // If set_cond_codes, condition codes will be Z on success, NZ on failure.
+ void check_klass_subtype_slow_path(Register sub_klass,
+ Register super_klass,
+ Register temp_reg,
+ Register temp2_reg,
+ Label* L_success,
+ Label* L_failure,
+ bool set_cond_codes = false);
+
+ // Simplified, combined version, good for typical uses.
+ // Falls through on failure.
+ void check_klass_subtype(Register sub_klass,
+ Register super_klass,
+ Register temp_reg,
+ Label& L_success);
+
+ // method handles (JSR 292)
+ Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0);
+
+ //----
+ void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0
+
+ // Debugging
+
+ // only if +VerifyOops
+ // TODO: Make these macros with file and line like sparc version!
+ void verify_oop(Register reg, const char* s = "broken oop");
+ void verify_oop_addr(Address addr, const char * s = "broken oop addr");
+
+ // TODO: verify method and klass metadata (compare against vptr?)
+ void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
+ void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
+
+#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
+#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
+
+ // only if +VerifyFPU
+ void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
+
+ // prints msg, dumps registers and stops execution
+ void stop(const char* msg);
+
+ // prints msg and continues
+ void warn(const char* msg);
+
+ // dumps registers and other state
+ void print_state();
+
+ static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
+ static void debug64(char* msg, int64_t pc, int64_t regs[]);
+ static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip);
+ static void print_state64(int64_t pc, int64_t regs[]);
+
+ void os_breakpoint();
+
+ void untested() { stop("untested"); }
+
+ void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); }
+
+ void should_not_reach_here() { stop("should not reach here"); }
+
+ void print_CPU_state();
+
+ // Stack overflow checking
+ void bang_stack_with_offset(int offset) {
+ // stack grows down, caller passes positive offset
+ assert(offset > 0, "must bang with negative offset");
+ movl(Address(rsp, (-offset)), rax);
+ }
+
+ // Writes to stack successive pages until offset reached to check for
+ // stack overflow + shadow pages. Also, clobbers tmp
+ void bang_stack_size(Register size, Register tmp);
+
+ virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
+ Register tmp,
+ int offset);
+
+ // Support for serializing memory accesses between threads
+ void serialize_memory(Register thread, Register tmp);
+
+ void verify_tlab();
+
+ // Biased locking support
+ // lock_reg and obj_reg must be loaded up with the appropriate values.
+ // swap_reg must be rax, and is killed.
+ // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
+ // be killed; if not supplied, push/pop will be used internally to
+ // allocate a temporary (inefficient, avoid if possible).
+ // Optional slow case is for implementations (interpreter and C1) which branch to
+ // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
+ // Returns offset of first potentially-faulting instruction for null
+ // check info (currently consumed only by C1). If
+ // swap_reg_contains_mark is true then returns -1 as it is assumed
+ // the calling code has already passed any potential faults.
+ int biased_locking_enter(Register lock_reg, Register obj_reg,
+ Register swap_reg, Register tmp_reg,
+ bool swap_reg_contains_mark,
+ Label& done, Label* slow_case = NULL,
+ BiasedLockingCounters* counters = NULL);
+ void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
+
+
+ Condition negate_condition(Condition cond);
+
+ // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
+ // operands. In general the names are modified to avoid hiding the instruction in Assembler
+ // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
+ // here in MacroAssembler. The major exception to this rule is call
+
+ // Arithmetics
+
+
+ void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
+ void addptr(Address dst, Register src);
+
+ void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
+ void addptr(Register dst, int32_t src);
+ void addptr(Register dst, Register src);
+ void addptr(Register dst, RegisterOrConstant src) {
+ if (src.is_constant()) addptr(dst, (int) src.as_constant());
+ else addptr(dst, src.as_register());
+ }
+
+ void andptr(Register dst, int32_t src);
+ void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
+
+ void cmp8(AddressLiteral src1, int imm);
+
+ // renamed to drag out the casting of address to int32_t/intptr_t
+ void cmp32(Register src1, int32_t imm);
+
+ void cmp32(AddressLiteral src1, int32_t imm);
+ // compare reg - mem, or reg - &mem
+ void cmp32(Register src1, AddressLiteral src2);
+
+ void cmp32(Register src1, Address src2);
+
+#ifndef _LP64
+ void cmpklass(Address dst, Metadata* obj);
+ void cmpklass(Register dst, Metadata* obj);
+ void cmpoop(Address dst, jobject obj);
+ void cmpoop(Register dst, jobject obj);
+#endif // _LP64
+
+ // NOTE src2 must be the lval. This is NOT an mem-mem compare
+ void cmpptr(Address src1, AddressLiteral src2);
+
+ void cmpptr(Register src1, AddressLiteral src2);
+
+ void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
+ void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
+ // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
+
+ void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
+ void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
+
+ // cmp64 to avoild hiding cmpq
+ void cmp64(Register src1, AddressLiteral src);
+
+ void cmpxchgptr(Register reg, Address adr);
+
+ void locked_cmpxchgptr(Register reg, AddressLiteral adr);
+
+
+ void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
+
+
+ void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
+
+ void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
+
+ void shlptr(Register dst, int32_t shift);
+ void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); }
+
+ void shrptr(Register dst, int32_t shift);
+ void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); }
+
+ void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); }
+ void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); }
+
+ void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
+
+ void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
+ void subptr(Register dst, int32_t src);
+ // Force generation of a 4 byte immediate value even if it fits into 8bit
+ void subptr_imm32(Register dst, int32_t src);
+ void subptr(Register dst, Register src);
+ void subptr(Register dst, RegisterOrConstant src) {
+ if (src.is_constant()) subptr(dst, (int) src.as_constant());
+ else subptr(dst, src.as_register());
+ }
+
+ void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
+ void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
+
+ void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
+ void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
+
+ void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; }
+
+
+
+ // Helper functions for statistics gathering.
+ // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
+ void cond_inc32(Condition cond, AddressLiteral counter_addr);
+ // Unconditional atomic increment.
+ void atomic_incl(AddressLiteral counter_addr);
+
+ void lea(Register dst, AddressLiteral adr);
+ void lea(Address dst, AddressLiteral adr);
+ void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
+
+ void leal32(Register dst, Address src) { leal(dst, src); }
+
+ // Import other testl() methods from the parent class or else
+ // they will be hidden by the following overriding declaration.
+ using Assembler::testl;
+ void testl(Register dst, AddressLiteral src);
+
+ void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
+ void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
+ void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
+
+ void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
+ void testptr(Register src1, Register src2);
+
+ void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
+ void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
+
+ // Calls
+
+ void call(Label& L, relocInfo::relocType rtype);
+ void call(Register entry);
+
+ // NOTE: this call tranfers to the effective address of entry NOT
+ // the address contained by entry. This is because this is more natural
+ // for jumps/calls.
+ void call(AddressLiteral entry);
+
+ // Emit the CompiledIC call idiom
+ void ic_call(address entry);
+
+ // Jumps
+
+ // NOTE: these jumps tranfer to the effective address of dst NOT
+ // the address contained by dst. This is because this is more natural
+ // for jumps/calls.
+ void jump(AddressLiteral dst);
+ void jump_cc(Condition cc, AddressLiteral dst);
+
+ // 32bit can do a case table jump in one instruction but we no longer allow the base
+ // to be installed in the Address class. This jump will tranfers to the address
+ // contained in the location described by entry (not the address of entry)
+ void jump(ArrayAddress entry);
+
+ // Floating
+
+ void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
+ void andpd(XMMRegister dst, AddressLiteral src);
+
+ void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
+ void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
+ void andps(XMMRegister dst, AddressLiteral src);
+
+ void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
+ void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
+ void comiss(XMMRegister dst, AddressLiteral src);
+
+ void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
+ void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
+ void comisd(XMMRegister dst, AddressLiteral src);
+
+ void fadd_s(Address src) { Assembler::fadd_s(src); }
+ void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); }
+
+ void fldcw(Address src) { Assembler::fldcw(src); }
+ void fldcw(AddressLiteral src);
+
+ void fld_s(int index) { Assembler::fld_s(index); }
+ void fld_s(Address src) { Assembler::fld_s(src); }
+ void fld_s(AddressLiteral src);
+
+ void fld_d(Address src) { Assembler::fld_d(src); }
+ void fld_d(AddressLiteral src);
+
+ void fld_x(Address src) { Assembler::fld_x(src); }
+ void fld_x(AddressLiteral src);
+
+ void fmul_s(Address src) { Assembler::fmul_s(src); }
+ void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
+
+ void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
+ void ldmxcsr(AddressLiteral src);
+
+ // compute pow(x,y) and exp(x) with x86 instructions. Don't cover
+ // all corner cases and may result in NaN and require fallback to a
+ // runtime call.
+ void fast_pow();
+ void fast_exp();
+ void increase_precision();
+ void restore_precision();
+
+ // computes exp(x). Fallback to runtime call included.
+ void exp_with_fallback(int num_fpu_regs_in_use) { pow_or_exp(true, num_fpu_regs_in_use); }
+ // computes pow(x,y). Fallback to runtime call included.
+ void pow_with_fallback(int num_fpu_regs_in_use) { pow_or_exp(false, num_fpu_regs_in_use); }
+
+private:
+
+ // call runtime as a fallback for trig functions and pow/exp.
+ void fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use);
+
+ // computes 2^(Ylog2X); Ylog2X in ST(0)
+ void pow_exp_core_encoding();
+
+ // computes pow(x,y) or exp(x). Fallback to runtime call included.
+ void pow_or_exp(bool is_exp, int num_fpu_regs_in_use);
+
+ // these are private because users should be doing movflt/movdbl
+
+ void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); }
+ void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
+ void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); }
+ void movss(XMMRegister dst, AddressLiteral src);
+
+ void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
+ void movlpd(XMMRegister dst, AddressLiteral src);
+
+public:
+
+ void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); }
+ void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); }
+ void addsd(XMMRegister dst, AddressLiteral src);
+
+ void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); }
+ void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); }
+ void addss(XMMRegister dst, AddressLiteral src);
+
+ void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); }
+ void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); }
+ void divsd(XMMRegister dst, AddressLiteral src);
+
+ void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); }
+ void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
+ void divss(XMMRegister dst, AddressLiteral src);
+
+ // Move Unaligned Double Quadword
+ void movdqu(Address dst, XMMRegister src) { Assembler::movdqu(dst, src); }
+ void movdqu(XMMRegister dst, Address src) { Assembler::movdqu(dst, src); }
+ void movdqu(XMMRegister dst, XMMRegister src) { Assembler::movdqu(dst, src); }
+ void movdqu(XMMRegister dst, AddressLiteral src);
+
+ void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
+ void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
+ void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
+ void movsd(XMMRegister dst, AddressLiteral src);
+
+ void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); }
+ void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); }
+ void mulsd(XMMRegister dst, AddressLiteral src);
+
+ void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); }
+ void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); }
+ void mulss(XMMRegister dst, AddressLiteral src);
+
+ void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); }
+ void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); }
+ void sqrtsd(XMMRegister dst, AddressLiteral src);
+
+ void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); }
+ void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); }
+ void sqrtss(XMMRegister dst, AddressLiteral src);
+
+ void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); }
+ void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); }
+ void subsd(XMMRegister dst, AddressLiteral src);
+
+ void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); }
+ void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); }
+ void subss(XMMRegister dst, AddressLiteral src);
+
+ void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
+ void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
+ void ucomiss(XMMRegister dst, AddressLiteral src);
+
+ void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); }
+ void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); }
+ void ucomisd(XMMRegister dst, AddressLiteral src);
+
+ // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
+ void xorpd(XMMRegister dst, XMMRegister src) { Assembler::xorpd(dst, src); }
+ void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); }
+ void xorpd(XMMRegister dst, AddressLiteral src);
+
+ // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
+ void xorps(XMMRegister dst, XMMRegister src) { Assembler::xorps(dst, src); }
+ void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); }
+ void xorps(XMMRegister dst, AddressLiteral src);
+
+ // Shuffle Bytes
+ void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); }
+ void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); }
+ void pshufb(XMMRegister dst, AddressLiteral src);
+ // AVX 3-operands instructions
+
+ void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); }
+ void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); }
+ void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
+
+ void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); }
+ void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); }
+ void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
+
+ void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vandpd(dst, nds, src, vector256); }
+ void vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vandpd(dst, nds, src, vector256); }
+ void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
+
+ void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vandps(dst, nds, src, vector256); }
+ void vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vandps(dst, nds, src, vector256); }
+ void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
+
+ void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); }
+ void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); }
+ void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
+
+ void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); }
+ void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); }
+ void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
+
+ void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); }
+ void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); }
+ void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
+
+ void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); }
+ void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); }
+ void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
+
+ void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); }
+ void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); }
+ void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
+
+ void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); }
+ void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); }
+ void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
+
+ // AVX Vector instructions
+
+ void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); }
+ void vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); }
+ void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
+
+ void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); }
+ void vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); }
+ void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256);
+
+ void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
+ if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2
+ Assembler::vpxor(dst, nds, src, vector256);
+ else
+ Assembler::vxorpd(dst, nds, src, vector256);
+ }
+ void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
+ if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2
+ Assembler::vpxor(dst, nds, src, vector256);
+ else
+ Assembler::vxorpd(dst, nds, src, vector256);
+ }
+
+ // Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector.
+ void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
+ if (UseAVX > 1) // vinserti128h is available only in AVX2
+ Assembler::vinserti128h(dst, nds, src);
+ else
+ Assembler::vinsertf128h(dst, nds, src);
+ }
+
+ // Data
+
+ void cmov32( Condition cc, Register dst, Address src);
+ void cmov32( Condition cc, Register dst, Register src);
+
+ void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); }
+
+ void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
+ void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); }
+
+ void movoop(Register dst, jobject obj);
+ void movoop(Address dst, jobject obj);
+
+ void mov_metadata(Register dst, Metadata* obj);
+ void mov_metadata(Address dst, Metadata* obj);
+
+ void movptr(ArrayAddress dst, Register src);
+ // can this do an lea?
+ void movptr(Register dst, ArrayAddress src);
+
+ void movptr(Register dst, Address src);
+
+ void movptr(Register dst, AddressLiteral src);
+
+ void movptr(Register dst, intptr_t src);
+ void movptr(Register dst, Register src);
+ void movptr(Address dst, intptr_t src);
+
+ void movptr(Address dst, Register src);
+
+ void movptr(Register dst, RegisterOrConstant src) {
+ if (src.is_constant()) movptr(dst, src.as_constant());
+ else movptr(dst, src.as_register());
+ }
+
+#ifdef _LP64
+ // Generally the next two are only used for moving NULL
+ // Although there are situations in initializing the mark word where
+ // they could be used. They are dangerous.
+
+ // They only exist on LP64 so that int32_t and intptr_t are not the same
+ // and we have ambiguous declarations.
+
+ void movptr(Address dst, int32_t imm32);
+ void movptr(Register dst, int32_t imm32);
+#endif // _LP64
+
+ // to avoid hiding movl
+ void mov32(AddressLiteral dst, Register src);
+ void mov32(Register dst, AddressLiteral src);
+
+ // to avoid hiding movb
+ void movbyte(ArrayAddress dst, int src);
+
+ // Import other mov() methods from the parent class or else
+ // they will be hidden by the following overriding declaration.
+ using Assembler::movdl;
+ using Assembler::movq;
+ void movdl(XMMRegister dst, AddressLiteral src);
+ void movq(XMMRegister dst, AddressLiteral src);
+
+ // Can push value or effective address
+ void pushptr(AddressLiteral src);
+
+ void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
+ void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
+
+ void pushoop(jobject obj);
+ void pushklass(Metadata* obj);
+
+ // sign extend as need a l to ptr sized element
+ void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
+ void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
+
+ // C2 compiled method's prolog code.
+ void verified_entry(int framesize, bool stack_bang, bool fp_mode_24b);
+
+ // IndexOf strings.
+ // Small strings are loaded through stack if they cross page boundary.
+ void string_indexof(Register str1, Register str2,
+ Register cnt1, Register cnt2,
+ int int_cnt2, Register result,
+ XMMRegister vec, Register tmp);
+
+ // IndexOf for constant substrings with size >= 8 elements
+ // which don't need to be loaded through stack.
+ void string_indexofC8(Register str1, Register str2,
+ Register cnt1, Register cnt2,
+ int int_cnt2, Register result,
+ XMMRegister vec, Register tmp);
+
+ // Smallest code: we don't need to load through stack,
+ // check string tail.
+
+ // Compare strings.
+ void string_compare(Register str1, Register str2,
+ Register cnt1, Register cnt2, Register result,
+ XMMRegister vec1);
+
+ // Compare char[] arrays.
+ void char_arrays_equals(bool is_array_equ, Register ary1, Register ary2,
+ Register limit, Register result, Register chr,
+ XMMRegister vec1, XMMRegister vec2);
+
+ // Fill primitive arrays
+ void generate_fill(BasicType t, bool aligned,
+ Register to, Register value, Register count,
+ Register rtmp, XMMRegister xtmp);
+
+#undef VIRTUAL
+
+};
+
+/**
+ * class SkipIfEqual:
+ *
+ * Instantiating this class will result in assembly code being output that will
+ * jump around any code emitted between the creation of the instance and it's
+ * automatic destruction at the end of a scope block, depending on the value of
+ * the flag passed to the constructor, which will be checked at run-time.
+ */
+class SkipIfEqual {
+ private:
+ MacroAssembler* _masm;
+ Label _label;
+
+ public:
+ SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value);
+ ~SkipIfEqual();
+};
+
+#endif // CPU_X86_VM_MACROASSEMBLER_X86_HPP
--- a/hotspot/src/cpu/x86/vm/metaspaceShared_x86_32.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/metaspaceShared_x86_32.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/codeBuffer.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
--- a/hotspot/src/cpu/x86/vm/metaspaceShared_x86_64.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/metaspaceShared_x86_64.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/codeBuffer.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
--- a/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
--- a/hotspot/src/cpu/x86/vm/nativeInst_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/nativeInst_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_x86.hpp"
#include "oops/oop.inline.hpp"
--- a/hotspot/src/cpu/x86/vm/relocInfo_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/relocInfo_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.inline.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
#include "code/relocInfo.hpp"
#include "nativeInst_x86.hpp"
#include "oops/oop.inline.hpp"
--- a/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -24,12 +24,11 @@
#include "precompiled.hpp"
#ifdef COMPILER2
-#include "asm/assembler.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/vmreg.hpp"
#include "interpreter/interpreter.hpp"
-#include "nativeInst_x86.hpp"
#include "opto/runtime.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/sharedRuntime.hpp"
--- a/hotspot/src/cpu/x86/vm/runtime_x86_64.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/runtime_x86_64.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -24,12 +24,11 @@
#include "precompiled.hpp"
#ifdef COMPILER2
-#include "asm/assembler.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/vmreg.hpp"
#include "interpreter/interpreter.hpp"
-#include "nativeInst_x86.hpp"
#include "opto/runtime.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/sharedRuntime.hpp"
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "oops/instanceOop.hpp"
@@ -37,19 +37,8 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
#include "utilities/top.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "oops/instanceOop.hpp"
@@ -37,19 +37,8 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
#include "utilities/top.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
--- a/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,18 +26,7 @@
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/stubRoutines.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
+#include "runtime/thread.inline.hpp"
// Implementation of the platform-specific part of StubRoutines - for
// a description of how to extend it, see the stubRoutines.hpp file.
--- a/hotspot/src/cpu/x86/vm/stubRoutines_x86_64.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/stubRoutines_x86_64.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,18 +26,7 @@
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/stubRoutines.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
+#include "runtime/thread.inline.hpp"
// Implementation of the platform-specific part of StubRoutines - for
// a description of how to extend it, see the stubRoutines.hpp file.
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
+#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
+#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
+#include "asm/macroAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/templateTable.hpp"
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/templateTable.hpp"
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/java.hpp"
#include "runtime/stubCodeGenerator.hpp"
--- a/hotspot/src/cpu/x86/vm/vtableStubs_x86_32.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/vtableStubs_x86_32.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_x86_32.hpp"
#include "memory/resourceArea.hpp"
--- a/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_x86_64.hpp"
#include "memory/resourceArea.hpp"
--- a/hotspot/src/cpu/zero/vm/assembler_zero.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/zero/vm/assembler_zero.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -46,6 +46,12 @@
return 0;
}
+#ifdef ASSERT
+bool AbstractAssembler::pd_check_instruction_mark() {
+ ShouldNotCallThis();
+}
+#endif
+
void Assembler::pd_patch_instruction(address branch, address target) {
ShouldNotCallThis();
}
@@ -80,6 +86,11 @@
emit_address((address) obj);
}
+void MacroAssembler::store_Metadata(Metadata* md) {
+ code_section()->relocate(pc(), metadata_Relocation::spec_for_immediate());
+ emit_address((address) md);
+}
+
static void should_not_call() {
report_should_not_call(__FILE__, __LINE__);
}
--- a/hotspot/src/cpu/zero/vm/assembler_zero.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/zero/vm/assembler_zero.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -55,14 +55,9 @@
public:
void advance(int bytes);
void store_oop(jobject obj);
+ void store_Metadata(Metadata* obj);
};
-#ifdef ASSERT
-inline bool AbstractAssembler::pd_check_instruction_mark() {
- ShouldNotCallThis();
-}
-#endif
-
address ShouldNotCallThisStub();
address ShouldNotCallThisEntry();
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -1015,11 +1015,7 @@
// Helper for figuring out if frames are interpreter frames
bool CppInterpreter::contains(address pc) {
-#ifdef PRODUCT
- ShouldNotCallThis();
-#else
return false; // make frame::print_value_on work
-#endif // !PRODUCT
}
// Result handlers and convertors
--- a/hotspot/src/cpu/zero/vm/globals_zero.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/zero/vm/globals_zero.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -52,11 +52,7 @@
define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true);
-#ifdef _ALLBSD_SOURCE
define_pd_global(bool, UseMembar, true);
-#else
-define_pd_global(bool, UseMembar, false);
-#endif
// GC Ergo Flags
define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
--- a/hotspot/src/cpu/zero/vm/interp_masm_zero.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/zero/vm/interp_masm_zero.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -37,11 +37,6 @@
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/sharedRuntime.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
+#include "runtime/thread.inline.hpp"
// This file is intentionally empty
--- a/hotspot/src/cpu/zero/vm/stubGenerator_zero.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/zero/vm/stubGenerator_zero.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -38,14 +38,9 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
#include "stack_zero.inline.hpp"
#include "utilities/top.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
--- a/hotspot/src/cpu/zero/vm/stubRoutines_zero.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/cpu/zero/vm/stubRoutines_zero.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -27,9 +27,4 @@
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/stubRoutines.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
+#include "runtime/thread.inline.hpp"
--- a/hotspot/src/os/bsd/vm/mutex_bsd.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/bsd/vm/mutex_bsd.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,7 +26,7 @@
#include "mutex_bsd.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/mutex.hpp"
-#include "thread_bsd.inline.hpp"
+#include "runtime/thread.inline.hpp"
#include "utilities/events.hpp"
// put OS-includes here
--- a/hotspot/src/os/bsd/vm/mutex_bsd.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/bsd/vm/mutex_bsd.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -27,7 +27,7 @@
#include "os_bsd.inline.hpp"
#include "runtime/interfaceSupport.hpp"
-#include "thread_bsd.inline.hpp"
+#include "runtime/thread.inline.hpp"
// Reconciliation History
--- a/hotspot/src/os/bsd/vm/osThread_bsd.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/bsd/vm/osThread_bsd.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,29 +23,10 @@
*/
// no precompiled headers
-#include "runtime/atomic.hpp"
-#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
-#include "runtime/os.hpp"
#include "runtime/osThread.hpp"
-#include "runtime/safepoint.hpp"
-#include "runtime/vmThread.hpp"
-#ifdef TARGET_ARCH_x86
-# include "assembler_x86.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "assembler_sparc.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "assembler_zero.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_arm
-# include "assembler_arm.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "assembler_ppc.inline.hpp"
-#endif
+#include <signal.h>
void OSThread::pd_initialize() {
assert(this != NULL, "check");
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -29,6 +29,7 @@
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
+#include "compiler/disassembler.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_bsd.h"
#include "memory/allocation.inline.hpp"
@@ -52,36 +53,16 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/statSampler.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
#include "runtime/timer.hpp"
#include "services/attachListener.hpp"
#include "services/runtimeService.hpp"
-#include "thread_bsd.inline.hpp"
#include "utilities/decoder.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/vmError.hpp"
-#ifdef TARGET_ARCH_x86
-# include "assembler_x86.inline.hpp"
-# include "nativeInst_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "assembler_sparc.inline.hpp"
-# include "nativeInst_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "assembler_zero.inline.hpp"
-# include "nativeInst_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_arm
-# include "assembler_arm.inline.hpp"
-# include "nativeInst_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "assembler_ppc.inline.hpp"
-# include "nativeInst_ppc.hpp"
-#endif
// put OS-includes here
# include <sys/types.h>
--- a/hotspot/src/os/bsd/vm/os_bsd.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/bsd/vm/os_bsd.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,13 +26,13 @@
#define OS_BSD_VM_OS_BSD_INLINE_HPP
#include "runtime/atomic.hpp"
+#include "runtime/atomic.inline.hpp"
#include "runtime/os.hpp"
+
#ifdef TARGET_OS_ARCH_bsd_x86
-# include "atomic_bsd_x86.inline.hpp"
# include "orderAccess_bsd_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_bsd_zero
-# include "atomic_bsd_zero.inline.hpp"
# include "orderAccess_bsd_zero.inline.hpp"
#endif
--- a/hotspot/src/os/bsd/vm/threadCritical_bsd.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/bsd/vm/threadCritical_bsd.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
-#include "thread_bsd.inline.hpp"
// put OS-includes here
# include <pthread.h>
--- a/hotspot/src/os/bsd/vm/thread_bsd.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/bsd/vm/thread_bsd.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,6 +25,10 @@
#ifndef OS_BSD_VM_THREAD_BSD_INLINE_HPP
#define OS_BSD_VM_THREAD_BSD_INLINE_HPP
+#ifndef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
+#error "This file should only be included from thread.inline.hpp"
+#endif
+
#include "runtime/atomic.hpp"
#include "runtime/prefetch.hpp"
#include "runtime/thread.hpp"
--- a/hotspot/src/os/linux/vm/mutex_linux.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/linux/vm/mutex_linux.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,7 +26,7 @@
#include "mutex_linux.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/mutex.hpp"
-#include "thread_linux.inline.hpp"
+#include "runtime/thread.inline.hpp"
#include "utilities/events.hpp"
// put OS-includes here
--- a/hotspot/src/os/linux/vm/mutex_linux.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/linux/vm/mutex_linux.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -27,7 +27,7 @@
#include "os_linux.inline.hpp"
#include "runtime/interfaceSupport.hpp"
-#include "thread_linux.inline.hpp"
+#include "runtime/thread.inline.hpp"
// Reconciliation History
--- a/hotspot/src/os/linux/vm/osThread_linux.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/linux/vm/osThread_linux.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,29 +23,10 @@
*/
// no precompiled headers
-#include "runtime/atomic.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/os.hpp"
+#include "runtime/mutex.hpp"
#include "runtime/osThread.hpp"
-#include "runtime/safepoint.hpp"
-#include "runtime/vmThread.hpp"
-#ifdef TARGET_ARCH_x86
-# include "assembler_x86.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "assembler_sparc.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "assembler_zero.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_arm
-# include "assembler_arm.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "assembler_ppc.inline.hpp"
-#endif
+#include <signal.h>
void OSThread::pd_initialize() {
assert(this != NULL, "check");
--- a/hotspot/src/os/linux/vm/os_linux.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/linux/vm/os_linux.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -29,6 +29,7 @@
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
+#include "compiler/disassembler.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_linux.h"
#include "memory/allocation.inline.hpp"
@@ -52,36 +53,16 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/statSampler.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
#include "runtime/timer.hpp"
#include "services/attachListener.hpp"
#include "services/runtimeService.hpp"
-#include "thread_linux.inline.hpp"
#include "utilities/decoder.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/vmError.hpp"
-#ifdef TARGET_ARCH_x86
-# include "assembler_x86.inline.hpp"
-# include "nativeInst_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "assembler_sparc.inline.hpp"
-# include "nativeInst_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "assembler_zero.inline.hpp"
-# include "nativeInst_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_arm
-# include "assembler_arm.inline.hpp"
-# include "nativeInst_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "assembler_ppc.inline.hpp"
-# include "nativeInst_ppc.hpp"
-#endif
// put OS-includes here
# include <sys/types.h>
--- a/hotspot/src/os/linux/vm/os_linux.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/linux/vm/os_linux.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,25 +26,22 @@
#define OS_LINUX_VM_OS_LINUX_INLINE_HPP
#include "runtime/atomic.hpp"
+#include "runtime/atomic.inline.hpp"
#include "runtime/os.hpp"
+
#ifdef TARGET_OS_ARCH_linux_x86
-# include "atomic_linux_x86.inline.hpp"
# include "orderAccess_linux_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_sparc
-# include "atomic_linux_sparc.inline.hpp"
# include "orderAccess_linux_sparc.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_zero
-# include "atomic_linux_zero.inline.hpp"
# include "orderAccess_linux_zero.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_arm
-# include "atomic_linux_arm.inline.hpp"
# include "orderAccess_linux_arm.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_ppc
-# include "atomic_linux_ppc.inline.hpp"
# include "orderAccess_linux_ppc.inline.hpp"
#endif
--- a/hotspot/src/os/linux/vm/threadCritical_linux.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/linux/vm/threadCritical_linux.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
-#include "thread_linux.inline.hpp"
// put OS-includes here
# include <pthread.h>
--- a/hotspot/src/os/linux/vm/thread_linux.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/linux/vm/thread_linux.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,6 +25,10 @@
#ifndef OS_LINUX_VM_THREAD_LINUX_INLINE_HPP
#define OS_LINUX_VM_THREAD_LINUX_INLINE_HPP
+#ifndef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
+#error "This file should only be included from thread.inline.hpp"
+#endif
+
#include "runtime/atomic.hpp"
#include "runtime/prefetch.hpp"
#include "runtime/thread.hpp"
--- a/hotspot/src/os/solaris/vm/mutex_solaris.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/solaris/vm/mutex_solaris.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,7 +26,7 @@
#include "mutex_solaris.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/mutex.hpp"
-#include "thread_solaris.inline.hpp"
+#include "runtime/thread.inline.hpp"
#include "utilities/events.hpp"
// Solaris-specific include, therefore not in includeDB_*
--- a/hotspot/src/os/solaris/vm/mutex_solaris.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/solaris/vm/mutex_solaris.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -27,6 +27,6 @@
#include "os_solaris.inline.hpp"
#include "runtime/interfaceSupport.hpp"
-#include "thread_solaris.inline.hpp"
+#include "runtime/thread.inline.hpp"
#endif // OS_SOLARIS_VM_MUTEX_SOLARIS_INLINE_HPP
--- a/hotspot/src/os/solaris/vm/osThread_solaris.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/solaris/vm/osThread_solaris.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -30,14 +30,8 @@
#include "runtime/osThread.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/vmThread.hpp"
-#ifdef TARGET_ARCH_x86
-# include "assembler_x86.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "assembler_sparc.inline.hpp"
-#endif
-# include <signal.h>
+#include <signal.h>
// ***************************************************************
// Platform dependent initialization and cleanup
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -29,6 +29,7 @@
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
+#include "compiler/disassembler.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_solaris.h"
#include "memory/allocation.inline.hpp"
@@ -52,25 +53,17 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/statSampler.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
#include "runtime/timer.hpp"
#include "services/attachListener.hpp"
#include "services/memTracker.hpp"
#include "services/runtimeService.hpp"
-#include "thread_solaris.inline.hpp"
#include "utilities/decoder.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/vmError.hpp"
-#ifdef TARGET_ARCH_x86
-# include "assembler_x86.inline.hpp"
-# include "nativeInst_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "assembler_sparc.inline.hpp"
-# include "nativeInst_sparc.hpp"
-#endif
// put OS-includes here
# include <dlfcn.h>
--- a/hotspot/src/os/solaris/vm/os_solaris.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/solaris/vm/os_solaris.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,13 +26,13 @@
#define OS_SOLARIS_VM_OS_SOLARIS_INLINE_HPP
#include "runtime/atomic.hpp"
+#include "runtime/atomic.inline.hpp"
#include "runtime/os.hpp"
+
#ifdef TARGET_OS_ARCH_solaris_x86
-# include "atomic_solaris_x86.inline.hpp"
# include "orderAccess_solaris_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_solaris_sparc
-# include "atomic_solaris_sparc.inline.hpp"
# include "orderAccess_solaris_sparc.inline.hpp"
#endif
--- a/hotspot/src/os/solaris/vm/threadCritical_solaris.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/solaris/vm/threadCritical_solaris.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
-#include "thread_solaris.inline.hpp"
// OS-includes here
#include <thread.h>
--- a/hotspot/src/os/solaris/vm/thread_solaris.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/solaris/vm/thread_solaris.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,6 +25,10 @@
#ifndef OS_SOLARIS_VM_THREAD_SOLARIS_INLINE_HPP
#define OS_SOLARIS_VM_THREAD_SOLARIS_INLINE_HPP
+#ifndef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
+#error "This file should only be included from thread.inline.hpp"
+#endif
+
#include "runtime/atomic.hpp"
#include "runtime/prefetch.hpp"
#include "runtime/thread.hpp"
--- a/hotspot/src/os/windows/vm/mutex_windows.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/windows/vm/mutex_windows.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,7 +26,7 @@
#include "mutex_windows.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/mutex.hpp"
-#include "thread_windows.inline.hpp"
+#include "runtime/thread.inline.hpp"
#include "utilities/events.hpp"
// put OS-includes here
--- a/hotspot/src/os/windows/vm/mutex_windows.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/windows/vm/mutex_windows.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -27,6 +27,6 @@
#include "os_windows.inline.hpp"
#include "runtime/interfaceSupport.hpp"
-#include "thread_windows.inline.hpp"
+#include "runtime/thread.inline.hpp"
#endif // OS_WINDOWS_VM_MUTEX_WINDOWS_INLINE_HPP
--- a/hotspot/src/os/windows/vm/osThread_windows.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/windows/vm/osThread_windows.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -30,9 +30,6 @@
#include "runtime/osThread.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/vmThread.hpp"
-#ifdef TARGET_ARCH_x86
-# include "assembler_x86.inline.hpp"
-#endif
void OSThread::pd_initialize() {
set_thread_handle(NULL);
--- a/hotspot/src/os/windows/vm/os_windows.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/windows/vm/os_windows.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -32,6 +32,7 @@
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
+#include "compiler/disassembler.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_windows.h"
#include "memory/allocation.inline.hpp"
@@ -55,20 +56,16 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/statSampler.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
#include "runtime/timer.hpp"
#include "services/attachListener.hpp"
#include "services/runtimeService.hpp"
-#include "thread_windows.inline.hpp"
#include "utilities/decoder.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/vmError.hpp"
-#ifdef TARGET_ARCH_x86
-# include "assembler_x86.inline.hpp"
-# include "nativeInst_x86.hpp"
-#endif
#ifdef _DEBUG
#include <crtdbg.h>
--- a/hotspot/src/os/windows/vm/os_windows.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/windows/vm/os_windows.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,9 +26,10 @@
#define OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP
#include "runtime/atomic.hpp"
+#include "runtime/atomic.inline.hpp"
#include "runtime/os.hpp"
+
#ifdef TARGET_OS_ARCH_windows_x86
-# include "atomic_windows_x86.inline.hpp"
# include "orderAccess_windows_x86.inline.hpp"
#endif
--- a/hotspot/src/os/windows/vm/threadCritical_windows.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/windows/vm/threadCritical_windows.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
-#include "thread_windows.inline.hpp"
// OS-includes here
# include <windows.h>
--- a/hotspot/src/os/windows/vm/thread_windows.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os/windows/vm/thread_windows.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,6 +25,10 @@
#ifndef OS_WINDOWS_VM_THREAD_WINDOWS_INLINE_HPP
#define OS_WINDOWS_VM_THREAD_WINDOWS_INLINE_HPP
+#ifndef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
+#error "This file should only be included from thread.inline.hpp"
+#endif
+
#include "runtime/atomic.hpp"
#include "runtime/prefetch.hpp"
#include "runtime/thread.hpp"
--- a/hotspot/src/os_cpu/bsd_x86/vm/assembler_bsd_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/bsd_x86/vm/assembler_bsd_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/threadLocalStorage.hpp"
--- a/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
// no precompiled headers
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
@@ -33,7 +33,6 @@
#include "jvm_bsd.h"
#include "memory/allocation.inline.hpp"
#include "mutex_bsd.inline.hpp"
-#include "nativeInst_x86.hpp"
#include "os_share_bsd.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm.h"
@@ -48,8 +47,8 @@
#include "runtime/osThread.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/timer.hpp"
-#include "thread_bsd.inline.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
--- a/hotspot/src/os_cpu/bsd_x86/vm/threadLS_bsd_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/bsd_x86/vm/threadLS_bsd_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadLocalStorage.hpp"
-#include "thread_bsd.inline.hpp"
// Map stack pointer (%esp) to thread pointer for faster TLS access
//
--- a/hotspot/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "runtime/frame.inline.hpp"
-#include "thread_bsd.inline.hpp"
+#include "runtime/thread.inline.hpp"
// For Forte Analyzer AsyncGetCallTrace profiling support - thread is
// currently interrupted by SIGPROF
--- a/hotspot/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -54,8 +54,8 @@
#include "runtime/osThread.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/timer.hpp"
-#include "thread_bsd.inline.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
--- a/hotspot/src/os_cpu/bsd_zero/vm/threadLS_bsd_zero.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/bsd_zero/vm/threadLS_bsd_zero.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -24,8 +24,8 @@
*/
#include "precompiled.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadLocalStorage.hpp"
-#include "thread_bsd.inline.hpp"
void ThreadLocalStorage::generate_code_for_get_thread() {
// nothing to do
--- a/hotspot/src/os_cpu/bsd_zero/vm/thread_bsd_zero.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/bsd_zero/vm/thread_bsd_zero.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "runtime/frame.inline.hpp"
-#include "thread_bsd.inline.hpp"
+#include "runtime/thread.inline.hpp"
void JavaThread::cache_global_variables() {
// nothing to do
--- a/hotspot/src/os_cpu/linux_sparc/vm/assembler_linux_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/linux_sparc/vm/assembler_linux_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_sparc.inline.hpp"
+#include "asm/macroAssembler.hpp"
#include "runtime/os.hpp"
#include "runtime/threadLocalStorage.hpp"
--- a/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
// no precompiled headers
-#include "assembler_sparc.inline.hpp"
+#include "asm/macroAssembler.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
@@ -48,8 +48,8 @@
#include "runtime/osThread.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/timer.hpp"
-#include "thread_linux.inline.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
--- a/hotspot/src/os_cpu/linux_sparc/vm/threadLS_linux_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/linux_sparc/vm/threadLS_linux_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadLocalStorage.hpp"
-#include "thread_linux.inline.hpp"
void ThreadLocalStorage::generate_code_for_get_thread() {
}
--- a/hotspot/src/os_cpu/linux_sparc/vm/thread_linux_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/linux_sparc/vm/thread_linux_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "runtime/frame.inline.hpp"
-#include "thread_linux.inline.hpp"
+#include "runtime/thread.inline.hpp"
// For Forte Analyzer AsyncGetCallTrace profiling support - thread is
// currently interrupted by SIGPROF
--- a/hotspot/src/os_cpu/linux_x86/vm/assembler_linux_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/linux_x86/vm/assembler_linux_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/threadLocalStorage.hpp"
--- a/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
// no precompiled headers
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
@@ -33,7 +33,6 @@
#include "jvm_linux.h"
#include "memory/allocation.inline.hpp"
#include "mutex_linux.inline.hpp"
-#include "nativeInst_x86.hpp"
#include "os_share_linux.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm.h"
@@ -48,8 +47,8 @@
#include "runtime/osThread.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/timer.hpp"
-#include "thread_linux.inline.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
--- a/hotspot/src/os_cpu/linux_x86/vm/threadLS_linux_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/linux_x86/vm/threadLS_linux_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadLocalStorage.hpp"
-#include "thread_linux.inline.hpp"
// Map stack pointer (%esp) to thread pointer for faster TLS access
//
--- a/hotspot/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "runtime/frame.inline.hpp"
-#include "thread_linux.inline.hpp"
+#include "runtime/thread.inline.hpp"
// For Forte Analyzer AsyncGetCallTrace profiling support - thread is
// currently interrupted by SIGPROF
--- a/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -49,8 +49,8 @@
#include "runtime/osThread.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/timer.hpp"
-#include "thread_linux.inline.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
--- a/hotspot/src/os_cpu/linux_zero/vm/threadLS_linux_zero.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/linux_zero/vm/threadLS_linux_zero.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -24,8 +24,8 @@
*/
#include "precompiled.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadLocalStorage.hpp"
-#include "thread_linux.inline.hpp"
void ThreadLocalStorage::generate_code_for_get_thread() {
// nothing to do
--- a/hotspot/src/os_cpu/linux_zero/vm/thread_linux_zero.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/linux_zero/vm/thread_linux_zero.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "runtime/frame.inline.hpp"
-#include "thread_linux.inline.hpp"
+#include "runtime/thread.inline.hpp"
void JavaThread::cache_global_variables() {
// nothing to do
--- a/hotspot/src/os_cpu/solaris_sparc/vm/assembler_solaris_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/assembler_solaris_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_sparc.inline.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/threadLocalStorage.hpp"
--- a/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
// no precompiled headers
-#include "assembler_sparc.inline.hpp"
+#include "asm/macroAssembler.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
@@ -48,8 +48,8 @@
#include "runtime/osThread.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/timer.hpp"
-#include "thread_solaris.inline.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
--- a/hotspot/src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/threadLS_solaris_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadLocalStorage.hpp"
-#include "thread_solaris.inline.hpp"
// Provides an entry point we can link against and
// a buffer we can emit code into. The buffer is
--- a/hotspot/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "runtime/frame.inline.hpp"
-#include "thread_solaris.inline.hpp"
+#include "runtime/thread.inline.hpp"
// For Forte Analyzer AsyncGetCallTrace profiling support - thread is
// currently interrupted by SIGPROF
--- a/hotspot/src/os_cpu/solaris_x86/vm/assembler_solaris_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/solaris_x86/vm/assembler_solaris_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/threadLocalStorage.hpp"
--- a/hotspot/src/os_cpu/solaris_x86/vm/orderAccess_solaris_x86.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/solaris_x86/vm/orderAccess_solaris_x86.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -27,6 +27,7 @@
#include "runtime/atomic.hpp"
#include "runtime/orderAccess.hpp"
+#include "runtime/os.hpp"
#include "vm_version_x86.hpp"
// Implementation of class OrderAccess.
--- a/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
// no precompiled headers
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
@@ -33,7 +33,6 @@
#include "jvm_solaris.h"
#include "memory/allocation.inline.hpp"
#include "mutex_solaris.inline.hpp"
-#include "nativeInst_x86.hpp"
#include "os_share_solaris.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm.h"
@@ -48,8 +47,8 @@
#include "runtime/osThread.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/timer.hpp"
-#include "thread_solaris.inline.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
--- a/hotspot/src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/solaris_x86/vm/threadLS_solaris_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadLocalStorage.hpp"
-#include "thread_solaris.inline.hpp"
#ifdef AMD64
extern "C" Thread* fs_load(ptrdiff_t tlsOffset);
--- a/hotspot/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "runtime/frame.inline.hpp"
-#include "thread_solaris.inline.hpp"
+#include "runtime/thread.inline.hpp"
// For Forte Analyzer AsyncGetCallTrace profiling support - thread is
// currently interrupted by SIGPROF
--- a/hotspot/src/os_cpu/windows_x86/vm/assembler_windows_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/windows_x86/vm/assembler_windows_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/threadLocalStorage.hpp"
--- a/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
*/
// no precompiled headers
-#include "assembler_x86.inline.hpp"
+#include "asm/macroAssembler.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
@@ -48,8 +48,8 @@
#include "runtime/osThread.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/timer.hpp"
-#include "thread_windows.inline.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
--- a/hotspot/src/os_cpu/windows_x86/vm/threadLS_windows_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/windows_x86/vm/threadLS_windows_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadLocalStorage.hpp"
-#include "thread_windows.inline.hpp"
// Provides an entry point we can link against and
// a buffer we can emit code into. The buffer is
--- a/hotspot/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "runtime/frame.inline.hpp"
-#include "thread_windows.inline.hpp"
+#include "runtime/thread.inline.hpp"
// For Forte Analyzer AsyncGetCallTrace profiling support - thread is
// currently interrupted by SIGPROF
--- a/hotspot/src/share/tools/LogCompilation/README Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/tools/LogCompilation/README Mon Dec 17 08:30:06 2012 -0500
@@ -13,6 +13,6 @@
More information about the LogCompilation output can be found at
-http://wikis.sun.com/display/HotSpotInternals/LogCompilation+overview
-http://wikis.sun.com/display/HotSpotInternals/PrintCompilation
-http://wikis.sun.com/display/HotSpotInternals/LogCompilation+tool
+https://wikis.oracle.com/display/HotSpotInternals/LogCompilation+overview
+https://wikis.oracle.com/display/HotSpotInternals/PrintCompilation
+https://wikis.oracle.com/display/HotSpotInternals/LogCompilation+tool
--- a/hotspot/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/CallSite.java Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/CallSite.java Mon Dec 17 08:30:06 2012 -0500
@@ -38,6 +38,7 @@
private String reason;
private List<CallSite> calls;
private int endNodes;
+ private int endLiveNodes;
private double timeStamp;
CallSite() {
@@ -106,7 +107,7 @@
}
}
if (getEndNodes() > 0) {
- stream.printf(" (end time: %6.4f nodes: %d)", getTimeStamp(), getEndNodes());
+ stream.printf(" (end time: %6.4f nodes: %d live: %d)", getTimeStamp(), getEndNodes(), getEndLiveNodes());
}
stream.println("");
if (getReceiver() != null) {
@@ -195,6 +196,14 @@
return endNodes;
}
+ void setEndLiveNodes(int n) {
+ endLiveNodes = n;
+ }
+
+ public int getEndLiveNodes() {
+ return endLiveNodes;
+ }
+
void setTimeStamp(double time) {
timeStamp = time;
}
--- a/hotspot/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogCompilation.java Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogCompilation.java Mon Dec 17 08:30:06 2012 -0500
@@ -37,13 +37,13 @@
public class LogCompilation extends DefaultHandler implements ErrorHandler, Constants {
public static void usage(int exitcode) {
- System.out.println("Usage: LogCompilation [ -v ] [ -c ] [ -s ] [ -e | -N ] file1 ...");
+ System.out.println("Usage: LogCompilation [ -v ] [ -c ] [ -s ] [ -e | -n ] file1 ...");
System.out.println(" -c: clean up malformed 1.5 xml");
System.out.println(" -i: print inlining decisions");
System.out.println(" -S: print compilation statistics");
System.out.println(" -s: sort events by start time");
System.out.println(" -e: sort events by elapsed time");
- System.out.println(" -N: sort events by name and start");
+ System.out.println(" -n: sort events by name and start");
System.exit(exitcode);
}
@@ -137,7 +137,11 @@
v2 = Integer.valueOf(0);
}
phaseNodes.put(phase.getName(), Integer.valueOf(v2.intValue() + phase.getNodes()));
- out.printf("\t%s %6.4f %d %d\n", phase.getName(), phase.getElapsedTime(), phase.getStartNodes(), phase.getNodes());
+ /* Print phase name, elapsed time, nodes at the start of the phase,
+ nodes created in the phase, live nodes at the start of the phase,
+ live nodes added in the phase.
+ */
+ out.printf("\t%s %6.4f %d %d %d %d\n", phase.getName(), phase.getElapsedTime(), phase.getStartNodes(), phase.getNodes(), phase.getStartLiveNodes(), phase.getLiveNodes());
}
} else if (e instanceof MakeNotEntrantEvent) {
MakeNotEntrantEvent mne = (MakeNotEntrantEvent) e;
--- a/hotspot/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java Mon Dec 17 08:30:06 2012 -0500
@@ -224,7 +224,6 @@
throw new InternalError("can't find " + name);
}
int indent = 0;
- String compile_id;
String type(String id) {
String result = types.get(id);
@@ -268,12 +267,18 @@
if (qname.equals("phase")) {
Phase p = new Phase(search(atts, "name"),
Double.parseDouble(search(atts, "stamp")),
- Integer.parseInt(search(atts, "nodes")));
+ Integer.parseInt(search(atts, "nodes", "0")),
+ Integer.parseInt(search(atts, "live")));
phaseStack.push(p);
} else if (qname.equals("phase_done")) {
Phase p = phaseStack.pop();
- p.setEndNodes(Integer.parseInt(search(atts, "nodes")));
+ if (! p.getId().equals(search(atts, "name"))) {
+ System.out.println("phase: " + p.getId());
+ throw new InternalError("phase name mismatch");
+ }
p.setEnd(Double.parseDouble(search(atts, "stamp")));
+ p.setEndNodes(Integer.parseInt(search(atts, "nodes", "0")));
+ p.setEndLiveNodes(Integer.parseInt(search(atts, "live")));
compile.getPhases().add(p);
} else if (qname.equals("task")) {
compile = new Compilation(Integer.parseInt(search(atts, "compile_id", "-1")));
@@ -317,13 +322,16 @@
m.setName(search(atts, "name"));
m.setReturnType(type(search(atts, "return")));
m.setArguments(search(atts, "arguments", "void"));
- m.setBytes(search(atts, "bytes"));
- m.setIICount(search(atts, "iicount"));
- m.setFlags(search(atts, "flags"));
+
+ if (search(atts, "unloaded", "0").equals("0")) {
+ m.setBytes(search(atts, "bytes"));
+ m.setIICount(search(atts, "iicount"));
+ m.setFlags(search(atts, "flags"));
+ }
methods.put(id, m);
} else if (qname.equals("call")) {
site = new CallSite(bci, method(search(atts, "method")));
- site.setCount(Integer.parseInt(search(atts, "count")));
+ site.setCount(Integer.parseInt(search(atts, "count", "0")));
String receiver = atts.getValue("receiver");
if (receiver != null) {
site.setReceiver(type(receiver));
@@ -406,6 +414,7 @@
} else if (qname.equals("parse_done")) {
CallSite call = scopes.pop();
call.setEndNodes(Integer.parseInt(search(atts, "nodes", "1")));
+ call.setEndLiveNodes(Integer.parseInt(search(atts, "live", "1")));
call.setTimeStamp(Double.parseDouble(search(atts, "stamp")));
scopes.push(call);
}
--- a/hotspot/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/Phase.java Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/Phase.java Mon Dec 17 08:30:06 2012 -0500
@@ -30,10 +30,13 @@
private final int startNodes;
private int endNodes;
+ private final int startLiveNodes;
+ private int endLiveNodes;
- Phase(String n, double s, int nodes) {
+ Phase(String n, double s, int nodes, int live) {
super(s, n);
startNodes = nodes;
+ startLiveNodes = live;
}
int getNodes() {
@@ -55,6 +58,22 @@
public int getEndNodes() {
return endNodes;
}
+ /* Number of live nodes added by the phase */
+ int getLiveNodes() {
+ return getEndLiveNodes() - getStartLiveNodes();
+ }
+
+ void setEndLiveNodes(int n) {
+ endLiveNodes = n;
+ }
+
+ public int getStartLiveNodes() {
+ return startLiveNodes;
+ }
+
+ public int getEndLiveNodes() {
+ return endLiveNodes;
+ }
@Override
public void print(PrintStream stream) {
--- a/hotspot/src/share/vm/adlc/main.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/adlc/main.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -212,7 +212,7 @@
AD.addInclude(AD._CPP_file, "adfiles", get_basename(AD._VM_file._name));
AD.addInclude(AD._CPP_file, "adfiles", get_basename(AD._HPP_file._name));
AD.addInclude(AD._CPP_file, "memory/allocation.inline.hpp");
- AD.addInclude(AD._CPP_file, "asm/assembler.hpp");
+ AD.addInclude(AD._CPP_file, "asm/macroAssembler.inline.hpp");
AD.addInclude(AD._CPP_file, "code/vmreg.hpp");
AD.addInclude(AD._CPP_file, "gc_interface/collectedHeap.inline.hpp");
AD.addInclude(AD._CPP_file, "oops/compiledICHolder.hpp");
@@ -231,17 +231,14 @@
AD.addInclude(AD._CPP_file, "runtime/stubRoutines.hpp");
AD.addInclude(AD._CPP_file, "utilities/growableArray.hpp");
#ifdef TARGET_ARCH_x86
- AD.addInclude(AD._CPP_file, "assembler_x86.inline.hpp");
AD.addInclude(AD._CPP_file, "nativeInst_x86.hpp");
AD.addInclude(AD._CPP_file, "vmreg_x86.inline.hpp");
#endif
#ifdef TARGET_ARCH_sparc
- AD.addInclude(AD._CPP_file, "assembler_sparc.inline.hpp");
AD.addInclude(AD._CPP_file, "nativeInst_sparc.hpp");
AD.addInclude(AD._CPP_file, "vmreg_sparc.inline.hpp");
#endif
#ifdef TARGET_ARCH_arm
- AD.addInclude(AD._CPP_file, "assembler_arm.inline.hpp");
AD.addInclude(AD._CPP_file, "nativeInst_arm.hpp");
AD.addInclude(AD._CPP_file, "vmreg_arm.inline.hpp");
#endif
--- a/hotspot/src/share/vm/asm/assembler.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/asm/assembler.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,26 +23,13 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "asm/assembler.inline.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "asm/codeBuffer.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/atomic.inline.hpp"
#include "runtime/icache.hpp"
#include "runtime/os.hpp"
-#ifdef TARGET_ARCH_x86
-# include "assembler_x86.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "assembler_sparc.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "assembler_zero.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_arm
-# include "assembler_arm.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "assembler_ppc.inline.hpp"
-#endif
// Implementation of AbstractAssembler
@@ -56,16 +43,13 @@
if (code == NULL) return;
CodeSection* cs = code->insts();
cs->clear_mark(); // new assembler kills old mark
- _code_section = cs;
- _code_begin = cs->start();
- _code_limit = cs->limit();
- _code_pos = cs->end();
- _oop_recorder= code->oop_recorder();
- DEBUG_ONLY( _short_branch_delta = 0; )
- if (_code_begin == NULL) {
+ if (cs->start() == NULL) {
vm_exit_out_of_memory(0, err_msg("CodeCache: no room for %s",
code->name()));
}
+ _code_section = cs;
+ _oop_recorder= code->oop_recorder();
+ DEBUG_ONLY( _short_branch_delta = 0; )
}
void AbstractAssembler::set_code_section(CodeSection* cs) {
@@ -73,9 +57,6 @@
assert(cs->is_allocated(), "need to pre-allocate this section");
cs->clear_mark(); // new assembly into this section kills old mark
_code_section = cs;
- _code_begin = cs->start();
- _code_limit = cs->limit();
- _code_pos = cs->end();
}
// Inform CodeBuffer that incoming code and relocation will be for stubs
@@ -83,7 +64,6 @@
CodeBuffer* cb = code();
CodeSection* cs = cb->stubs();
assert(_code_section == cb->insts(), "not in insts?");
- sync();
if (cs->maybe_expand_to_ensure_remaining(required_space)
&& cb->blob() == NULL) {
return NULL;
@@ -96,7 +76,6 @@
// Should not be called if start_a_stub() returned NULL
void AbstractAssembler::end_a_stub() {
assert(_code_section == code()->stubs(), "not in stubs?");
- sync();
set_code_section(code()->insts());
}
@@ -104,8 +83,7 @@
address AbstractAssembler::start_a_const(int required_space, int required_align) {
CodeBuffer* cb = code();
CodeSection* cs = cb->consts();
- assert(_code_section == cb->insts(), "not in insts?");
- sync();
+ assert(_code_section == cb->insts() || _code_section == cb->stubs(), "not in insts/stubs?");
address end = cs->end();
int pad = -(intptr_t)end & (required_align-1);
if (cs->maybe_expand_to_ensure_remaining(pad + required_space)) {
@@ -121,16 +99,13 @@
}
// Inform CodeBuffer that incoming code and relocation will be code
-// Should not be called if start_a_const() returned NULL
-void AbstractAssembler::end_a_const() {
+// in section cs (insts or stubs).
+void AbstractAssembler::end_a_const(CodeSection* cs) {
assert(_code_section == code()->consts(), "not in consts?");
- sync();
- set_code_section(code()->insts());
+ set_code_section(cs);
}
-
void AbstractAssembler::flush() {
- sync();
ICache::invalidate_range(addr_at(0), offset());
}
--- a/hotspot/src/share/vm/asm/assembler.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/asm/assembler.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,12 +25,14 @@
#ifndef SHARE_VM_ASM_ASSEMBLER_HPP
#define SHARE_VM_ASM_ASSEMBLER_HPP
+#include "asm/codeBuffer.hpp"
#include "code/oopRecorder.hpp"
#include "code/relocInfo.hpp"
#include "memory/allocation.hpp"
#include "utilities/debug.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/top.hpp"
+
#ifdef TARGET_ARCH_x86
# include "register_x86.hpp"
# include "vm_version_x86.hpp"
@@ -54,7 +56,6 @@
// This file contains platform-independent assembler declarations.
-class CodeBuffer;
class MacroAssembler;
class AbstractAssembler;
class Label;
@@ -122,7 +123,7 @@
assert(_loc == -1, "already bound");
_loc = loc;
}
- void bind_loc(int pos, int sect); // = bind_loc(locator(pos, sect))
+ void bind_loc(int pos, int sect) { bind_loc(CodeBuffer::locator(pos, sect)); }
#ifndef PRODUCT
// Iterates over all unresolved instructions for printing
@@ -137,8 +138,8 @@
assert(_loc >= 0, "unbound label");
return _loc;
}
- int loc_pos() const; // == locator_pos(loc())
- int loc_sect() const; // == locator_sect(loc())
+ int loc_pos() const { return CodeBuffer::locator_pos(loc()); }
+ int loc_sect() const { return CodeBuffer::locator_sect(loc()); }
bool is_bound() const { return _loc >= 0; }
bool is_unbound() const { return _loc == -1 && _patch_index > 0; }
@@ -201,26 +202,32 @@
protected:
CodeSection* _code_section; // section within the code buffer
- address _code_begin; // first byte of code buffer
- address _code_limit; // first byte after code buffer
- address _code_pos; // current code generation position
OopRecorder* _oop_recorder; // support for relocInfo::oop_type
// Code emission & accessing
- address addr_at(int pos) const { return _code_begin + pos; }
+ address addr_at(int pos) const { return code_section()->start() + pos; }
+
// This routine is called with a label is used for an address.
// Labels and displacements truck in offsets, but target must return a PC.
- address target(Label& L); // return _code_section->target(L)
+ address target(Label& L) { return code_section()->target(L, pc()); }
bool is8bit(int x) const { return -0x80 <= x && x < 0x80; }
bool isByte(int x) const { return 0 <= x && x < 0x100; }
bool isShiftCount(int x) const { return 0 <= x && x < 32; }
- void emit_byte(int x); // emit a single byte
- void emit_word(int x); // emit a 16-bit word (not a wordSize word!)
- void emit_long(jint x); // emit a 32-bit word (not a longSize word!)
- void emit_address(address x); // emit an address (not a longSize word!)
+ void emit_int8( int8_t x) { code_section()->emit_int8( x); }
+ void emit_int16( int16_t x) { code_section()->emit_int16( x); }
+ void emit_int32( int32_t x) { code_section()->emit_int32( x); }
+ void emit_int64( int64_t x) { code_section()->emit_int64( x); }
+
+ void emit_float( jfloat x) { code_section()->emit_float( x); }
+ void emit_double( jdouble x) { code_section()->emit_double( x); }
+ void emit_address(address x) { code_section()->emit_address(x); }
+
+ void emit_byte(int x) { emit_int8 (x); } // deprecated
+ void emit_word(int x) { emit_int16(x); } // deprecated
+ void emit_long(jint x) { emit_int32(x); } // deprecated
// Instruction boundaries (required when emitting relocatable values).
class InstructionMark: public StackObj {
@@ -237,10 +244,10 @@
}
};
friend class InstructionMark;
- #ifdef ASSERT
+#ifdef ASSERT
// Make it return true on platforms which need to verify
// instruction boundaries for some operations.
- inline static bool pd_check_instruction_mark();
+ static bool pd_check_instruction_mark();
// Add delta to short branch distance to verify that it still fit into imm8.
int _short_branch_delta;
@@ -262,13 +269,13 @@
_assm->clear_short_branch_delta();
}
};
- #else
+#else
// Dummy in product.
class ShortBranchVerifier: public StackObj {
public:
ShortBranchVerifier(AbstractAssembler* assm) {}
};
- #endif
+#endif
// Label functions
void print(Label& L);
@@ -278,9 +285,6 @@
// Creation
AbstractAssembler(CodeBuffer* code);
- // save end pointer back to code buf.
- void sync();
-
// ensure buf contains all code (call this before using/copying the code)
void flush();
@@ -308,26 +312,31 @@
static bool is_simm32(intptr_t x) { return is_simm(x, 32); }
// Accessors
- CodeBuffer* code() const; // _code_section->outer()
CodeSection* code_section() const { return _code_section; }
- int sect() const; // return _code_section->index()
- address pc() const { return _code_pos; }
- int offset() const { return _code_pos - _code_begin; }
- int locator() const; // CodeBuffer::locator(offset(), sect())
+ CodeBuffer* code() const { return code_section()->outer(); }
+ int sect() const { return code_section()->index(); }
+ address pc() const { return code_section()->end(); }
+ int offset() const { return code_section()->size(); }
+ int locator() const { return CodeBuffer::locator(offset(), sect()); }
+
OopRecorder* oop_recorder() const { return _oop_recorder; }
void set_oop_recorder(OopRecorder* r) { _oop_recorder = r; }
- address inst_mark() const;
- void set_inst_mark();
- void clear_inst_mark();
+ address inst_mark() const { return code_section()->mark(); }
+ void set_inst_mark() { code_section()->set_mark(); }
+ void clear_inst_mark() { code_section()->clear_mark(); }
// Constants in code
void a_byte(int x);
void a_long(jint x);
- void relocate(RelocationHolder const& rspec, int format = 0);
+ void relocate(RelocationHolder const& rspec, int format = 0) {
+ assert(!pd_check_instruction_mark()
+ || inst_mark() == NULL || inst_mark() == code_section()->end(),
+ "call relocate() between instructions");
+ code_section()->relocate(code_section()->end(), rspec, format);
+ }
void relocate( relocInfo::relocType rtype, int format = 0) {
- if (rtype != relocInfo::none)
- relocate(Relocation::spec_simple(rtype), format);
+ code_section()->relocate(code_section()->end(), rtype, format);
}
static int code_fill_byte(); // used to pad out odd-sized code buffers
@@ -348,52 +357,55 @@
void end_a_stub();
// Ditto for constants.
address start_a_const(int required_space, int required_align = sizeof(double));
- void end_a_const();
+ void end_a_const(CodeSection* cs); // Pass the codesection to continue in (insts or stubs?).
// constants support
+ //
+ // We must remember the code section (insts or stubs) in c1
+ // so we can reset to the proper section in end_a_const().
address long_constant(jlong c) {
+ CodeSection* c1 = _code_section;
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
- *(jlong*)ptr = c;
- _code_pos = ptr + sizeof(c);
- end_a_const();
+ emit_int64(c);
+ end_a_const(c1);
}
return ptr;
}
address double_constant(jdouble c) {
+ CodeSection* c1 = _code_section;
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
- *(jdouble*)ptr = c;
- _code_pos = ptr + sizeof(c);
- end_a_const();
+ emit_double(c);
+ end_a_const(c1);
}
return ptr;
}
address float_constant(jfloat c) {
+ CodeSection* c1 = _code_section;
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
- *(jfloat*)ptr = c;
- _code_pos = ptr + sizeof(c);
- end_a_const();
+ emit_float(c);
+ end_a_const(c1);
}
return ptr;
}
address address_constant(address c) {
+ CodeSection* c1 = _code_section;
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
- *(address*)ptr = c;
- _code_pos = ptr + sizeof(c);
- end_a_const();
+ emit_address(c);
+ end_a_const(c1);
}
return ptr;
}
address address_constant(address c, RelocationHolder const& rspec) {
+ CodeSection* c1 = _code_section;
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
relocate(rspec);
- *(address*)ptr = c;
- _code_pos = ptr + sizeof(c);
- end_a_const();
+ emit_address(c);
+ end_a_const(c1);
}
return ptr;
}
--- a/hotspot/src/share/vm/asm/assembler.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/asm/assembler.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,92 +26,21 @@
#define SHARE_VM_ASM_ASSEMBLER_INLINE_HPP
#include "asm/assembler.hpp"
-#include "asm/codeBuffer.hpp"
-#include "compiler/disassembler.hpp"
-#include "runtime/threadLocalStorage.hpp"
-inline void AbstractAssembler::sync() {
- CodeSection* cs = code_section();
- guarantee(cs->start() == _code_begin, "must not shift code buffer");
- cs->set_end(_code_pos);
-}
-
-inline void AbstractAssembler::emit_byte(int x) {
- assert(isByte(x), "not a byte");
- *(unsigned char*)_code_pos = (unsigned char)x;
- _code_pos += sizeof(unsigned char);
- sync();
-}
-
-
-inline void AbstractAssembler::emit_word(int x) {
- *(short*)_code_pos = (short)x;
- _code_pos += sizeof(short);
- sync();
-}
-
-
-inline void AbstractAssembler::emit_long(jint x) {
- *(jint*)_code_pos = x;
- _code_pos += sizeof(jint);
- sync();
-}
-
-inline void AbstractAssembler::emit_address(address x) {
- *(address*)_code_pos = x;
- _code_pos += sizeof(address);
- sync();
-}
-
-inline address AbstractAssembler::inst_mark() const {
- return code_section()->mark();
-}
-
-
-inline void AbstractAssembler::set_inst_mark() {
- code_section()->set_mark();
-}
-
-
-inline void AbstractAssembler::clear_inst_mark() {
- code_section()->clear_mark();
-}
-
-
-inline void AbstractAssembler::relocate(RelocationHolder const& rspec, int format) {
- assert(!pd_check_instruction_mark()
- || inst_mark() == NULL || inst_mark() == _code_pos,
- "call relocate() between instructions");
- code_section()->relocate(_code_pos, rspec, format);
-}
-
-
-inline CodeBuffer* AbstractAssembler::code() const {
- return code_section()->outer();
-}
-
-inline int AbstractAssembler::sect() const {
- return code_section()->index();
-}
-
-inline int AbstractAssembler::locator() const {
- return CodeBuffer::locator(offset(), sect());
-}
-
-inline address AbstractAssembler::target(Label& L) {
- return code_section()->target(L, pc());
-}
-
-inline int Label::loc_pos() const {
- return CodeBuffer::locator_pos(loc());
-}
-
-inline int Label::loc_sect() const {
- return CodeBuffer::locator_sect(loc());
-}
-
-inline void Label::bind_loc(int pos, int sect) {
- bind_loc(CodeBuffer::locator(pos, sect));
-}
+#ifdef TARGET_ARCH_x86
+# include "assembler_x86.inline.hpp"
+#endif
+#ifdef TARGET_ARCH_sparc
+# include "assembler_sparc.inline.hpp"
+#endif
+#ifdef TARGET_ARCH_zero
+# include "assembler_zero.inline.hpp"
+#endif
+#ifdef TARGET_ARCH_arm
+# include "assembler_arm.inline.hpp"
+#endif
+#ifdef TARGET_ARCH_ppc
+# include "assembler_ppc.inline.hpp"
+#endif
#endif // SHARE_VM_ASM_ASSEMBLER_INLINE_HPP
--- a/hotspot/src/share/vm/asm/codeBuffer.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/asm/codeBuffer.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -254,6 +254,10 @@
return start + locator_pos(locator);
}
+bool CodeBuffer::is_backward_branch(Label& L) {
+ return L.is_bound() && insts_end() <= locator_address(L.loc());
+}
+
address CodeBuffer::decode_begin() {
address begin = _insts.start();
if (_decode_begin != NULL && _decode_begin > begin)
@@ -492,6 +496,26 @@
dest->verify_section_allocation();
}
+// Anonymous classes need mirror to keep the metadata alive but
+// for regular classes, the class_loader is sufficient.
+static void append_oop_references(GrowableArray<oop>* oops, Klass* k) {
+ if (k->oop_is_instance()) {
+ InstanceKlass* ik = InstanceKlass::cast(k);
+ if (ik->is_anonymous()) {
+ oop o = ik->java_mirror();
+ assert (o != NULL, "should have a mirror");
+ if (!oops->contains(o)) {
+ oops->append(o);
+ }
+ return; // only need the mirror
+ }
+ }
+ oop cl = k->class_loader();
+ if (cl != NULL && !oops->contains(cl)) {
+ oops->append(cl);
+ }
+}
+
void CodeBuffer::finalize_oop_references(methodHandle mh) {
No_Safepoint_Verifier nsv;
@@ -509,7 +533,6 @@
if (md->metadata_is_immediate()) {
Metadata* m = md->metadata_value();
if (oop_recorder()->is_real(m)) {
- oop o = NULL;
if (m->is_methodData()) {
m = ((MethodData*)m)->method();
}
@@ -517,16 +540,13 @@
m = ((Method*)m)->method_holder();
}
if (m->is_klass()) {
- o = ((Klass*)m)->class_loader();
+ append_oop_references(&oops, (Klass*)m);
} else {
// XXX This will currently occur for MDO which don't
// have a backpointer. This has to be fixed later.
m->print();
ShouldNotReachHere();
}
- if (o != NULL && oops.find(o) == -1) {
- oops.append(o);
- }
}
}
}
@@ -537,7 +557,6 @@
for (int i = 0; i < oop_recorder()->metadata_count(); i++) {
Metadata* m = oop_recorder()->metadata_at(i);
if (oop_recorder()->is_real(m)) {
- oop o = NULL;
if (m->is_methodData()) {
m = ((MethodData*)m)->method();
}
@@ -545,24 +564,18 @@
m = ((Method*)m)->method_holder();
}
if (m->is_klass()) {
- o = ((Klass*)m)->class_loader();
+ append_oop_references(&oops, (Klass*)m);
} else {
m->print();
ShouldNotReachHere();
}
- if (o != NULL && oops.find(o) == -1) {
- oops.append(o);
- }
}
}
}
// Add the class loader of Method* for the nmethod itself
- oop cl = mh->method_holder()->class_loader();
- if (cl != NULL) {
- oops.append(cl);
- }
+ append_oop_references(&oops, mh->method_holder());
// Add any oops that we've found
Thread* thread = Thread::current();
@@ -749,7 +762,18 @@
// Make the new code copy use the old copy's relocations:
dest_cs->initialize_locs_from(cs);
+ }
+ // Do relocation after all sections are copied.
+ // This is necessary if the code uses constants in stubs, which are
+ // relocated when the corresponding instruction in the code (e.g., a
+ // call) is relocated. Stubs are placed behind the main code
+ // section, so that section has to be copied before relocating.
+ for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) {
+ // pull code out of each section
+ const CodeSection* cs = code_section(n);
+ if (cs->is_empty()) continue; // skip trivial section
+ CodeSection* dest_cs = dest->code_section(n);
{ // Repair the pc relative information in the code after the move
RelocIterator iter(dest_cs);
while (iter.next()) {
--- a/hotspot/src/share/vm/asm/codeBuffer.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/asm/codeBuffer.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,17 +25,15 @@
#ifndef SHARE_VM_ASM_CODEBUFFER_HPP
#define SHARE_VM_ASM_CODEBUFFER_HPP
-#include "asm/assembler.hpp"
#include "code/oopRecorder.hpp"
#include "code/relocInfo.hpp"
-class CodeComments;
-class AbstractAssembler;
-class MacroAssembler;
-class PhaseCFG;
-class Compile;
-class BufferBlob;
-class CodeBuffer;
+class CodeComments;
+class PhaseCFG;
+class Compile;
+class BufferBlob;
+class CodeBuffer;
+class Label;
class CodeOffsets: public StackObj {
public:
@@ -194,10 +192,14 @@
}
// Code emission
- void emit_int8 (int8_t x) { *((int8_t*) end()) = x; set_end(end() + 1); }
- void emit_int16(int16_t x) { *((int16_t*) end()) = x; set_end(end() + 2); }
- void emit_int32(int32_t x) { *((int32_t*) end()) = x; set_end(end() + 4); }
- void emit_int64(int64_t x) { *((int64_t*) end()) = x; set_end(end() + 8); }
+ void emit_int8 ( int8_t x) { *((int8_t*) end()) = x; set_end(end() + sizeof(int8_t)); }
+ void emit_int16( int16_t x) { *((int16_t*) end()) = x; set_end(end() + sizeof(int16_t)); }
+ void emit_int32( int32_t x) { *((int32_t*) end()) = x; set_end(end() + sizeof(int32_t)); }
+ void emit_int64( int64_t x) { *((int64_t*) end()) = x; set_end(end() + sizeof(int64_t)); }
+
+ void emit_float( jfloat x) { *((jfloat*) end()) = x; set_end(end() + sizeof(jfloat)); }
+ void emit_double(jdouble x) { *((jdouble*) end()) = x; set_end(end() + sizeof(jdouble)); }
+ void emit_address(address x) { *((address*) end()) = x; set_end(end() + sizeof(address)); }
// Share a scratch buffer for relocinfo. (Hacky; saves a resource allocation.)
void initialize_shared_locs(relocInfo* buf, int length);
@@ -451,6 +453,9 @@
int locator(address addr) const;
address locator_address(int locator) const;
+ // Heuristic for pre-packing the taken/not-taken bit of a predicted branch.
+ bool is_backward_branch(Label& L);
+
// Properties
const char* name() const { return _name; }
CodeBuffer* before_expand() const { return _before_expand; }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/asm/macroAssembler.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_ASM_MACROASSEMBLER_HPP
+#define SHARE_VM_ASM_MACROASSEMBLER_HPP
+
+#include "asm/assembler.hpp"
+
+#ifdef TARGET_ARCH_x86
+# include "macroAssembler_x86.hpp"
+#endif
+#ifdef TARGET_ARCH_sparc
+# include "macroAssembler_sparc.hpp"
+#endif
+#ifdef TARGET_ARCH_zero
+# include "assembler_zero.hpp"
+#endif
+#ifdef TARGET_ARCH_arm
+# include "assembler_arm.hpp"
+#endif
+#ifdef TARGET_ARCH_ppc
+# include "assembler_ppc.hpp"
+#endif
+
+#endif // SHARE_VM_ASM_MACROASSEMBLER_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/asm/macroAssembler.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_ASM_MACROASSEMBLER_INLINE_HPP
+#define SHARE_VM_ASM_MACROASSEMBLER_INLINE_HPP
+
+#include "asm/macroAssembler.hpp"
+
+#ifdef TARGET_ARCH_x86
+// no macroAssembler_x86.inline.hpp
+#endif
+#ifdef TARGET_ARCH_sparc
+# include "macroAssembler_sparc.inline.hpp"
+#endif
+#ifdef TARGET_ARCH_zero
+# include "assembler_zero.inline.hpp"
+#endif
+#ifdef TARGET_ARCH_arm
+# include "assembler_arm.inline.hpp"
+#endif
+#ifdef TARGET_ARCH_ppc
+# include "assembler_ppc.inline.hpp"
+#endif
+
+#endif // SHARE_VM_ASM_MACROASSEMBLER_INLINE_HPP
--- a/hotspot/src/share/vm/asm/register.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/asm/register.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -93,6 +93,21 @@
#define REGISTER_DEFINITION(type, name) \
const type name = ((type)name##_##type##EnumValue)
+#ifdef TARGET_ARCH_x86
+# include "register_x86.hpp"
+#endif
+#ifdef TARGET_ARCH_sparc
+# include "register_sparc.hpp"
+#endif
+#ifdef TARGET_ARCH_zero
+# include "register_zero.hpp"
+#endif
+#ifdef TARGET_ARCH_arm
+# include "register_arm.hpp"
+#endif
+#ifdef TARGET_ARCH_ppc
+# include "register_ppc.hpp"
+#endif
// Debugging support
--- a/hotspot/src/share/vm/c1/c1_Compilation.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/c1/c1_Compilation.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -129,7 +129,15 @@
CHECK_BAILOUT();
// setup ir
+ CompileLog* log = this->log();
+ if (log != NULL) {
+ log->begin_head("parse method='%d' ",
+ log->identify(_method));
+ log->stamp();
+ log->end_head();
+ }
_hir = new IR(this, method(), osr_bci());
+ if (log) log->done("parse");
if (!_hir->is_valid()) {
bailout("invalid parsing");
return;
--- a/hotspot/src/share/vm/c1/c1_MacroAssembler.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/c1/c1_MacroAssembler.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,22 +25,8 @@
#ifndef SHARE_VM_C1_C1_MACROASSEMBLER_HPP
#define SHARE_VM_C1_C1_MACROASSEMBLER_HPP
-#include "asm/assembler.hpp"
-#ifdef TARGET_ARCH_x86
-# include "assembler_x86.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "assembler_sparc.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "assembler_zero.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_arm
-# include "assembler_arm.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "assembler_ppc.inline.hpp"
-#endif
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
class CodeEmitInfo;
--- a/hotspot/src/share/vm/ci/ciMethod.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -742,6 +742,24 @@
}
// ------------------------------------------------------------------
+// ciMethod::get_field_at_bci
+ciField* ciMethod::get_field_at_bci(int bci, bool &will_link) {
+ ciBytecodeStream iter(this);
+ iter.reset_to_bci(bci);
+ iter.next();
+ return iter.get_field(will_link);
+}
+
+// ------------------------------------------------------------------
+// ciMethod::get_method_at_bci
+ciMethod* ciMethod::get_method_at_bci(int bci, bool &will_link, ciSignature* *declared_signature) {
+ ciBytecodeStream iter(this);
+ iter.reset_to_bci(bci);
+ iter.next();
+ return iter.get_method(will_link, declared_signature);
+}
+
+// ------------------------------------------------------------------
// Adjust a CounterData count to be commensurate with
// interpreter_invocation_count. If the MDO exists for
// only 25% of the time the method exists, then the
--- a/hotspot/src/share/vm/ci/ciMethod.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/ci/ciMethod.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -226,6 +226,9 @@
ciCallProfile call_profile_at_bci(int bci);
int interpreter_call_site_count(int bci);
+ ciField* get_field_at_bci( int bci, bool &will_link);
+ ciMethod* get_method_at_bci(int bci, bool &will_link, ciSignature* *declared_signature);
+
// Given a certain calling environment, find the monomorphic target
// for the call. Return NULL if the call is not monomorphic in
// its calling environment.
--- a/hotspot/src/share/vm/ci/ciReplay.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/ci/ciReplay.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -31,7 +31,7 @@
#include "memory/resourceArea.hpp"
#include "utilities/copy.hpp"
-#ifdef ASSERT
+#ifndef PRODUCT
// ciReplay
@@ -939,4 +939,4 @@
ciMethodRecord* rec = replay_state->find_ciMethodRecord(method);
return rec != NULL;
}
-#endif
+#endif // PRODUCT
--- a/hotspot/src/share/vm/ci/ciReplay.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/ci/ciReplay.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -32,7 +32,7 @@
class ciReplay {
CI_PACKAGE_ACCESS
-#ifdef ASSERT
+#ifndef PRODUCT
private:
static int replay_impl(TRAPS);
--- a/hotspot/src/share/vm/ci/ciSignature.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/ci/ciSignature.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -57,12 +57,14 @@
ciSymbol* as_symbol() const { return _symbol; }
ciKlass* accessing_klass() const { return _accessing_klass; }
- ciType* return_type() const;
- ciType* type_at(int index) const;
+ ciType* return_type() const;
+ ciType* type_at(int index) const;
int size() const { return _size; }
int count() const { return _count; }
+ int arg_size_for_bc(Bytecodes::Code bc) { return size() + (Bytecodes::has_receiver(bc) ? 1 : 0); }
+
bool equals(ciSignature* that);
void print_signature();
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -2184,7 +2184,7 @@
Method* m = Method::allocate(
loader_data, code_length, access_flags, linenumber_table_length,
total_lvt_length, exception_table_length, checked_exceptions_length,
- ConstMethod::NORMAL, CHECK_(nullHandle));
+ generic_signature_index, ConstMethod::NORMAL, CHECK_(nullHandle));
ClassLoadingService::add_class_method_size(m->size()*HeapWordSize);
@@ -2192,7 +2192,6 @@
m->set_constants(cp());
m->set_name_index(name_index);
m->set_signature_index(signature_index);
- m->set_generic_signature_index(generic_signature_index);
#ifdef CC_INTERP
// hmm is there a gc issue here??
ResultTypeFinder rtf(cp->symbol_at(signature_index));
@@ -2950,7 +2949,7 @@
instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
- Handle class_loader,
+ ClassLoaderData* loader_data,
Handle protection_domain,
KlassHandle host_klass,
GrowableArray<Handle>* cp_patches,
@@ -2964,7 +2963,7 @@
// original class bytes.
unsigned char *cached_class_file_bytes = NULL;
jint cached_class_file_length;
- ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
+ Handle class_loader(THREAD, loader_data->class_loader());
bool has_default_methods = false;
ResourceMark rm(THREAD);
@@ -3005,7 +3004,7 @@
unsigned char* ptr = cfs->buffer();
unsigned char* end_ptr = cfs->buffer() + cfs->length();
- JvmtiExport::post_class_file_load_hook(name, class_loader, protection_domain,
+ JvmtiExport::post_class_file_load_hook(name, class_loader(), protection_domain,
&ptr, &end_ptr,
&cached_class_file_bytes,
&cached_class_file_length);
@@ -4004,8 +4003,7 @@
assert(k->size_helper() > 0, "layout_helper is initialized");
if ((!RegisterFinalizersAtInit && k->has_finalizer())
|| k->is_abstract() || k->is_interface()
- || (k->name() == vmSymbols::java_lang_Class()
- && k->class_loader_data()->is_the_null_class_loader_data())
+ || (k->name() == vmSymbols::java_lang_Class() && k->class_loader() == NULL)
|| k->size_helper() >= FastAllocateSizeLimit) {
// Forbid fast-path allocation.
jint lh = Klass::instance_layout_helper(k->size_helper(), true);
--- a/hotspot/src/share/vm/classfile/classFileParser.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/classfile/classFileParser.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -363,16 +363,16 @@
// "parsed_name" is updated by this method, and is the name found
// while parsing the stream.
instanceKlassHandle parseClassFile(Symbol* name,
- Handle class_loader,
+ ClassLoaderData* loader_data,
Handle protection_domain,
TempNewSymbol& parsed_name,
bool verify,
TRAPS) {
KlassHandle no_host_klass;
- return parseClassFile(name, class_loader, protection_domain, no_host_klass, NULL, parsed_name, verify, THREAD);
+ return parseClassFile(name, loader_data, protection_domain, no_host_klass, NULL, parsed_name, verify, THREAD);
}
instanceKlassHandle parseClassFile(Symbol* name,
- Handle class_loader,
+ ClassLoaderData* loader_data,
Handle protection_domain,
KlassHandle host_klass,
GrowableArray<Handle>* cp_patches,
--- a/hotspot/src/share/vm/classfile/classLoader.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,6 +26,7 @@
#include "classfile/classFileParser.hpp"
#include "classfile/classFileStream.hpp"
#include "classfile/classLoader.hpp"
+#include "classfile/classLoaderData.inline.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
@@ -910,11 +911,11 @@
// class file found, parse it
ClassFileParser parser(stream);
- Handle class_loader;
+ ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
Handle protection_domain;
TempNewSymbol parsed_name = NULL;
instanceKlassHandle result = parser.parseClassFile(h_name,
- class_loader,
+ loader_data,
protection_domain,
parsed_name,
false,
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -65,13 +65,19 @@
ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
ClassLoaderData::ClassLoaderData(Handle h_class_loader) : _class_loader(h_class_loader()),
- _metaspace(NULL), _unloading(false), _klasses(NULL),
- _claimed(0), _jmethod_ids(NULL), _handles(NULL),
- _deallocate_list(NULL), _next(NULL),
+ _metaspace(NULL), _unloading(false), _keep_alive(false), _klasses(NULL),
+ _claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL),
+ _next(NULL), _dependencies(NULL),
_metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true)) {
// empty
}
+void ClassLoaderData::init_dependencies(TRAPS) {
+ // Create empty dependencies array to add to. CMS requires this to be
+ // an oop so that it can track additions via card marks. We think.
+ _dependencies = (oop)oopFactory::new_objectArray(2, CHECK);
+}
+
bool ClassLoaderData::claim() {
if (_claimed == 1) {
return false;
@@ -86,6 +92,7 @@
}
f->do_oop(&_class_loader);
+ f->do_oop(&_dependencies);
_handles->oops_do(f);
if (klass_closure != NULL) {
classes_do(klass_closure);
@@ -110,70 +117,100 @@
ClassLoaderData * const from_cld = this;
ClassLoaderData * const to_cld = k->class_loader_data();
- // Records dependency between non-null class loaders only.
- if (to_cld->is_the_null_class_loader_data() || from_cld->is_the_null_class_loader_data()) {
+ // Dependency to the null class loader data doesn't need to be recorded
+ // because the null class loader data never goes away.
+ if (to_cld->is_the_null_class_loader_data()) {
return;
}
- // Check that this dependency isn't from the same or parent class_loader
- oop to = to_cld->class_loader();
- oop from = from_cld->class_loader();
+ oop to;
+ if (to_cld->is_anonymous()) {
+ // Anonymous class dependencies are through the mirror.
+ to = k->java_mirror();
+ } else {
+ to = to_cld->class_loader();
- oop curr = from;
- while (curr != NULL) {
- if (curr == to) {
- return; // this class loader is in the parent list, no need to add it.
+ // If from_cld is anonymous, even if it's class_loader is a parent of 'to'
+ // we still have to add it. The class_loader won't keep from_cld alive.
+ if (!from_cld->is_anonymous()) {
+ // Check that this dependency isn't from the same or parent class_loader
+ oop from = from_cld->class_loader();
+
+ oop curr = from;
+ while (curr != NULL) {
+ if (curr == to) {
+ return; // this class loader is in the parent list, no need to add it.
+ }
+ curr = java_lang_ClassLoader::parent(curr);
+ }
}
- curr = java_lang_ClassLoader::parent(curr);
}
// It's a dependency we won't find through GC, add it. This is relatively rare
- from_cld->add_dependency(to_cld, CHECK);
+ // Must handle over GC point.
+ Handle dependency(THREAD, to);
+ from_cld->add_dependency(dependency, CHECK);
}
-bool ClassLoaderData::has_dependency(ClassLoaderData* dependency) {
- oop loader = dependency->class_loader();
- // Get objArrayOop out of the class_loader oop and see if this dependency
- // is there. Don't safepoint! These are all oops.
- // Dependency list is (oop class_loader, objArrayOop next)
- objArrayOop ok = (objArrayOop)java_lang_ClassLoader::dependencies(class_loader());
+void ClassLoaderData::add_dependency(Handle dependency, TRAPS) {
+ // Check first if this dependency is already in the list.
+ // Save a pointer to the last to add to under the lock.
+ objArrayOop ok = (objArrayOop)_dependencies;
+ objArrayOop last = NULL;
while (ok != NULL) {
- if (ok->obj_at(0) == loader) {
- return true;
+ last = ok;
+ if (ok->obj_at(0) == dependency()) {
+ // Don't need to add it
+ return;
}
ok = (objArrayOop)ok->obj_at(1);
}
- return false;
+
+ // Create a new dependency node with fields for (class_loader or mirror, next)
+ objArrayOop deps = oopFactory::new_objectArray(2, CHECK);
+ deps->obj_at_put(0, dependency());
+
+ // Must handle over more GC points
+ objArrayHandle new_dependency(THREAD, deps);
+
+ // Add the dependency under lock
+ assert (last != NULL, "dependencies should be initialized");
+ objArrayHandle last_handle(THREAD, last);
+ locked_add_dependency(last_handle, new_dependency);
}
-void ClassLoaderData::add_dependency(ClassLoaderData* dependency, TRAPS) {
- // Minimize the number of duplicates in the list.
- if (has_dependency(dependency)) {
- return;
- }
+void ClassLoaderData::locked_add_dependency(objArrayHandle last_handle,
+ objArrayHandle new_dependency) {
- // Create a new dependency node with fields for (class_loader, next)
- objArrayOop deps = oopFactory::new_objectArray(2, CHECK);
- deps->obj_at_put(0, dependency->class_loader());
+ // Have to lock and put the new dependency on the end of the dependency
+ // array so the card mark for CMS sees that this dependency is new.
+ // Can probably do this lock free with some effort.
+ MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
+
+ oop loader_or_mirror = new_dependency->obj_at(0);
- // Add this lock free, using compare and exchange, need barriers for GC
- // Do the barrier first.
- HeapWord* addr = java_lang_ClassLoader::dependencies_addr(class_loader());
- while (true) {
- oop old_dependency = java_lang_ClassLoader::dependencies(class_loader());
- deps->obj_at_put(1, old_dependency);
-
- oop newold = oopDesc::atomic_compare_exchange_oop((oop)deps, addr, old_dependency, true);
- if (newold == old_dependency) {
- update_barrier_set((void*)addr, (oop)deps);
- // we won the race to add this dependency
- break;
+ // Since the dependencies are only added, add to the end.
+ objArrayOop end = last_handle();
+ objArrayOop last = NULL;
+ while (end != NULL) {
+ last = end;
+ // check again if another thread added it to the end.
+ if (end->obj_at(0) == loader_or_mirror) {
+ // Don't need to add it
+ return;
}
+ end = (objArrayOop)end->obj_at(1);
+ }
+ assert (last != NULL, "dependencies should be initialized");
+ // fill in the first element with the oop in new_dependency.
+ if (last->obj_at(0) == NULL) {
+ last->obj_at_put(0, new_dependency->obj_at(0));
+ } else {
+ last->obj_at_put(1, new_dependency());
}
}
-
void ClassLoaderDataGraph::clear_claimed_marks() {
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
cld->clear_claimed();
@@ -187,7 +224,7 @@
// link the new item into the list
_klasses = k;
- if (TraceClassLoaderData && k->class_loader_data() != NULL) {
+ if (TraceClassLoaderData && Verbose && k->class_loader_data() != NULL) {
ResourceMark rm;
tty->print_cr("[TraceClassLoaderData] Adding k: " PTR_FORMAT " %s to CLD: "
PTR_FORMAT " loader: " PTR_FORMAT " %s",
@@ -195,8 +232,7 @@
k->external_name(),
k->class_loader_data(),
k->class_loader(),
- k->class_loader() != NULL ? k->class_loader()->klass()->external_name() : "NULL"
- );
+ loader_name());
}
}
@@ -221,6 +257,38 @@
ShouldNotReachHere(); // should have found this class!!
}
+
+bool ClassLoaderData::is_anonymous() const {
+ Klass* k = _klasses;
+ return (_keep_alive || (k != NULL && k->oop_is_instance() &&
+ InstanceKlass::cast(k)->is_anonymous()));
+}
+
+void ClassLoaderData::unload() {
+ _unloading = true;
+
+ if (TraceClassLoaderData) {
+ ResourceMark rm;
+ tty->print("[ClassLoaderData: unload loader data "PTR_FORMAT, this);
+ tty->print(" for instance "PTR_FORMAT" of %s", class_loader(),
+ loader_name());
+ if (is_anonymous()) {
+ tty->print(" for anonymous class "PTR_FORMAT " ", _klasses);
+ }
+ tty->print_cr("]");
+ }
+}
+
+bool ClassLoaderData::is_alive(BoolObjectClosure* is_alive_closure) const {
+ bool alive =
+ is_anonymous() ?
+ is_alive_closure->do_object_b(_klasses->java_mirror()) :
+ class_loader() == NULL || is_alive_closure->do_object_b(class_loader());
+ assert(!alive || claimed(), "must be claimed");
+ return alive;
+}
+
+
ClassLoaderData::~ClassLoaderData() {
Metaspace *m = _metaspace;
if (m != NULL) {
@@ -263,8 +331,8 @@
if (_metaspace != NULL) {
return _metaspace;
}
- if (class_loader() == NULL) {
- assert(this == the_null_class_loader_data(), "Must be");
+ if (this == the_null_class_loader_data()) {
+ assert (class_loader() == NULL, "Must be");
size_t word_size = Metaspace::first_chunk_word_size();
set_metaspace(new Metaspace(_metaspace_lock, word_size));
} else {
@@ -325,12 +393,19 @@
}
}
-#ifndef PRODUCT
-void ClassLoaderData::print_loader(ClassLoaderData *loader_data, outputStream* out) {
- oop class_loader = loader_data->class_loader();
- out->print("%s", SystemDictionary::loader_name(class_loader));
+// These anonymous class loaders are to contain classes used for JSR292
+ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(oop loader, TRAPS) {
+ // Add a new class loader data to the graph.
+ ClassLoaderData* cld = ClassLoaderDataGraph::add(NULL, loader, CHECK_NULL);
+ return cld;
}
+const char* ClassLoaderData::loader_name() {
+ // Handles null class loader
+ return SystemDictionary::loader_name(class_loader());
+}
+
+#ifndef PRODUCT
// Define to dump klasses
#undef CLD_DUMP_KLASSES
@@ -338,8 +413,7 @@
ResourceMark rm;
out->print("ClassLoaderData CLD: "PTR_FORMAT", loader: "PTR_FORMAT", loader_klass: "PTR_FORMAT" %s {",
this, class_loader(),
- class_loader() != NULL ? class_loader()->klass() : NULL,
- class_loader() != NULL ? class_loader()->klass()->external_name() : "NULL");
+ class_loader() != NULL ? class_loader()->klass() : NULL, loader_name());
if (claimed()) out->print(" claimed ");
if (is_unloading()) out->print(" unloading ");
out->print(" handles " INTPTR_FORMAT, handles());
@@ -373,8 +447,8 @@
void ClassLoaderData::verify() {
oop cl = class_loader();
- guarantee(this == class_loader_data(cl), "Must be the same");
- guarantee(cl != NULL || this == ClassLoaderData::the_null_class_loader_data(), "must be");
+ guarantee(this == class_loader_data(cl) || is_anonymous(), "Must be the same");
+ guarantee(cl != NULL || this == ClassLoaderData::the_null_class_loader_data() || is_anonymous(), "must be");
// Verify the integrity of the allocated space.
if (metaspace_or_null() != NULL) {
@@ -387,6 +461,7 @@
}
}
+
// GC root of class loader data created.
ClassLoaderData* ClassLoaderDataGraph::_head = NULL;
ClassLoaderData* ClassLoaderDataGraph::_unloading = NULL;
@@ -395,19 +470,25 @@
// Add a new class loader data node to the list. Assign the newly created
// ClassLoaderData into the java/lang/ClassLoader object as a hidden field
-ClassLoaderData* ClassLoaderDataGraph::add(ClassLoaderData** cld_addr, Handle loader_data) {
+ClassLoaderData* ClassLoaderDataGraph::add(ClassLoaderData** cld_addr, Handle loader, TRAPS) {
// Not assigned a class loader data yet.
// Create one.
ClassLoaderData* *list_head = &_head;
ClassLoaderData* next = _head;
- ClassLoaderData* cld = new ClassLoaderData(loader_data);
+ ClassLoaderData* cld = new ClassLoaderData(loader);
- // First, Atomically set it.
- ClassLoaderData* old = (ClassLoaderData*) Atomic::cmpxchg_ptr(cld, cld_addr, NULL);
- if (old != NULL) {
- delete cld;
- // Returns the data.
- return old;
+ if (cld_addr != NULL) {
+ // First, Atomically set it
+ ClassLoaderData* old = (ClassLoaderData*) Atomic::cmpxchg_ptr(cld, cld_addr, NULL);
+ if (old != NULL) {
+ delete cld;
+ // Returns the data.
+ return old;
+ }
+ } else {
+ // Disallow unloading for this CLD during initialization if there is no
+ // class_loader oop to link this to.
+ cld->set_keep_alive(true);
}
// We won the race, and therefore the task of adding the data to the list of
@@ -417,16 +498,22 @@
ClassLoaderData* exchanged = (ClassLoaderData*)Atomic::cmpxchg_ptr(cld, list_head, next);
if (exchanged == next) {
if (TraceClassLoaderData) {
+ ResourceMark rm;
tty->print("[ClassLoaderData: ");
tty->print("create class loader data "PTR_FORMAT, cld);
- tty->print(" for instance "PTR_FORMAT" of ", cld->class_loader());
- loader_data->klass()->name()->print_symbol_on(tty);
+ tty->print(" for instance "PTR_FORMAT" of %s", cld->class_loader(),
+ cld->loader_name());
tty->print_cr("]");
}
+ // Create dependencies after the CLD is added to the list. Otherwise,
+ // the GC GC will not find the CLD and the _class_loader field will
+ // not be updated.
+ cld->init_dependencies(CHECK_NULL);
return cld;
}
next = exchanged;
} while (true);
+
}
void ClassLoaderDataGraph::oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
@@ -435,9 +522,19 @@
}
}
+void ClassLoaderDataGraph::keep_alive_oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
+ for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
+ if (cld->keep_alive()) {
+ cld->oops_do(f, klass_closure, must_claim);
+ }
+ }
+}
+
void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
if (ClassUnloading) {
ClassLoaderData::the_null_class_loader_data()->oops_do(f, klass_closure, must_claim);
+ // keep any special CLDs alive.
+ ClassLoaderDataGraph::keep_alive_oops_do(f, klass_closure, must_claim);
} else {
ClassLoaderDataGraph::oops_do(f, klass_closure, must_claim);
}
@@ -516,9 +613,10 @@
}
#endif // PRODUCT
+
// Move class loader data from main list to the unloaded list for unloading
// and deallocation later.
-bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive) {
+bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
ClassLoaderData* data = _head;
ClassLoaderData* prev = NULL;
bool seen_dead_loader = false;
@@ -527,8 +625,7 @@
bool has_redefined_a_class = JvmtiExport::has_redefined_a_class();
MetadataOnStackMark md_on_stack;
while (data != NULL) {
- if (data->class_loader() == NULL || is_alive->do_object_b(data->class_loader())) {
- assert(data->claimed(), "class loader data must have been claimed");
+ if (data->keep_alive() || data->is_alive(is_alive_closure)) {
if (has_redefined_a_class) {
data->classes_do(InstanceKlass::purge_previous_versions);
}
@@ -539,13 +636,7 @@
}
seen_dead_loader = true;
ClassLoaderData* dead = data;
- dead->mark_for_unload();
- if (TraceClassLoaderData) {
- tty->print("[ClassLoaderData: unload loader data "PTR_FORMAT, dead);
- tty->print(" for instance "PTR_FORMAT" of ", dead->class_loader());
- dead->class_loader()->klass()->name()->print_symbol_on(tty);
- tty->print_cr("]");
- }
+ dead->unload();
data = data->next();
// Remove from loader list.
if (prev != NULL) {
--- a/hotspot/src/share/vm/classfile/classLoaderData.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/classfile/classLoaderData.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -62,13 +62,14 @@
// CMS support.
static ClassLoaderData* _saved_head;
- static ClassLoaderData* add(ClassLoaderData** loader_data_addr, Handle class_loader);
+ static ClassLoaderData* add(ClassLoaderData** loader_data_addr, Handle class_loader, TRAPS);
public:
- static ClassLoaderData* find_or_create(Handle class_loader);
+ static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
static void purge();
static void clear_claimed_marks();
static void oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim);
static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
+ static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
static void classes_do(KlassClosure* klass_closure);
static bool do_unloading(BoolObjectClosure* is_alive);
@@ -101,10 +102,13 @@
oop _class_loader; // oop used to uniquely identify a class loader
// class loader or a canonical class path
+ oop _dependencies; // oop to hold dependencies from this class loader
+ // data to others.
Metaspace * _metaspace; // Meta-space where meta-data defined by the
// classes in the class loader are allocated.
Mutex* _metaspace_lock; // Locks the metaspace for allocations and setup.
bool _unloading; // true if this class loader goes away
+ bool _keep_alive; // if this CLD can be unloaded for anonymous loaders
volatile int _claimed; // true if claimed, for example during GC traces.
// To avoid applying oop closure more than once.
// Has to be an int because we cas it.
@@ -129,8 +133,8 @@
static Metaspace* _ro_metaspace;
static Metaspace* _rw_metaspace;
- bool has_dependency(ClassLoaderData* cld);
- void add_dependency(ClassLoaderData* to_loader_data, TRAPS);
+ void add_dependency(Handle dependency, TRAPS);
+ void locked_add_dependency(objArrayHandle last, objArrayHandle new_dependency);
void set_next(ClassLoaderData* next) { _next = next; }
ClassLoaderData* next() const { return _next; }
@@ -150,7 +154,9 @@
bool claimed() const { return _claimed == 1; }
bool claim();
- void mark_for_unload() { _unloading = true; }
+ void unload();
+ bool keep_alive() const { return _keep_alive; }
+ bool is_alive(BoolObjectClosure* is_alive_closure) const;
void classes_do(void f(InstanceKlass*));
@@ -168,6 +174,8 @@
return _the_null_class_loader_data;
}
+ bool is_anonymous() const;
+
static void init_null_class_loader_data() {
assert(_the_null_class_loader_data == NULL, "cannot initialize twice");
assert(ClassLoaderDataGraph::_head == NULL, "cannot initialize twice");
@@ -194,6 +202,9 @@
assert(!(is_the_null_class_loader_data() && _unloading), "The null class loader can never be unloaded");
return _unloading;
}
+ // Anonymous class loader data doesn't have anything to keep them from
+ // being unloaded during parsing the anonymous class.
+ void set_keep_alive(bool value) { _keep_alive = value; }
unsigned int identity_hash() {
return _class_loader == NULL ? 0 : _class_loader->identity_hash();
@@ -211,15 +222,18 @@
void print_value_on(outputStream* out) const PRODUCT_RETURN;
void dump(outputStream * const out) PRODUCT_RETURN;
void verify();
+ const char* loader_name();
jobject add_handle(Handle h);
void add_class(Klass* k);
void remove_class(Klass* k);
void record_dependency(Klass* to, TRAPS);
+ void init_dependencies(TRAPS);
void add_to_deallocate_list(Metadata* m);
static ClassLoaderData* class_loader_data(oop loader);
+ static ClassLoaderData* anonymous_class_loader_data(oop loader, TRAPS);
static void print_loader(ClassLoaderData *loader_data, outputStream *out);
// CDS support
--- a/hotspot/src/share/vm/classfile/classLoaderData.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/classfile/classLoaderData.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -33,7 +33,7 @@
}
-inline ClassLoaderData *ClassLoaderDataGraph::find_or_create(Handle loader) {
+inline ClassLoaderData *ClassLoaderDataGraph::find_or_create(Handle loader, TRAPS) {
assert(loader() != NULL,"Must be a class loader");
// Gets the class loader data out of the java/lang/ClassLoader object, if non-null
// it's already in the loader_data, so no need to add
@@ -42,5 +42,5 @@
if (loader_data_id) {
return loader_data_id;
}
- return ClassLoaderDataGraph::add(loader_data_addr, loader);
+ return ClassLoaderDataGraph::add(loader_data_addr, loader, THREAD);
}
--- a/hotspot/src/share/vm/classfile/defaultMethods.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/classfile/defaultMethods.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -1148,12 +1148,11 @@
int code_length = bytecodes->length();
Method* m = Method::allocate(cp->pool_holder()->class_loader_data(),
- code_length, flags, 0, 0, 0, 0, mt, CHECK_NULL);
+ code_length, flags, 0, 0, 0, 0, 0, mt, CHECK_NULL);
m->set_constants(NULL); // This will get filled in later
m->set_name_index(cp->utf8(name));
m->set_signature_index(cp->utf8(sig));
- m->set_generic_signature_index(0);
#ifdef CC_INTERP
ResultTypeFinder rtf(sig);
m->set_result_index(rtf.type());
--- a/hotspot/src/share/vm/classfile/dictionary.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/classfile/dictionary.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -580,7 +580,7 @@
// class loader must be present; a null class loader is the
// boostrap loader
guarantee(loader_data != NULL || DumpSharedSpaces ||
- loader_data->is_the_null_class_loader_data() ||
+ loader_data->class_loader() == NULL ||
loader_data->class_loader()->is_instance(),
"checking type of class_loader");
e->verify();
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -47,20 +47,9 @@
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/safepoint.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/vframe.hpp"
#include "utilities/preserveException.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
#define INJECTED_FIELD_COMPUTE_OFFSET(klass, name, signature, may_be_java) \
klass::_##name##_offset = JavaClasses::compute_injected_offset(JavaClasses::klass##_##name##_enum);
@@ -2555,8 +2544,8 @@
void java_lang_invoke_MemberName::set_vmtarget(oop mname, Metadata* ref) {
assert(is_instance(mname), "wrong type");
-#ifdef ASSERT
// check the type of the vmtarget
+ oop dependency = NULL;
if (ref != NULL) {
switch (flags(mname) & (MN_IS_METHOD |
MN_IS_CONSTRUCTOR |
@@ -2564,28 +2553,21 @@
case MN_IS_METHOD:
case MN_IS_CONSTRUCTOR:
assert(ref->is_method(), "should be a method");
+ dependency = ((Method*)ref)->method_holder()->java_mirror();
break;
case MN_IS_FIELD:
assert(ref->is_klass(), "should be a class");
+ dependency = ((Klass*)ref)->java_mirror();
break;
default:
ShouldNotReachHere();
}
}
-#endif //ASSERT
mname->address_field_put(_vmtarget_offset, (address)ref);
- oop loader = NULL;
- if (ref != NULL) {
- if (ref->is_klass()) {
- loader = ((Klass*)ref)->class_loader();
- } else if (ref->is_method()) {
- loader = ((Method*)ref)->method_holder()->class_loader();
- } else {
- ShouldNotReachHere();
- }
- }
- // Add a reference to the loader to ensure the metadata is kept alive
- mname->obj_field_put(_vmloader_offset, loader);
+ // Add a reference to the loader (actually mirror because anonymous classes will not have
+ // distinct loaders) to ensure the metadata is kept alive
+ // This mirror may be different than the one in clazz field.
+ mname->obj_field_put(_vmloader_offset, dependency);
}
intptr_t java_lang_invoke_MemberName::vmindex(oop mname) {
@@ -2750,7 +2732,6 @@
bool java_lang_ClassLoader::offsets_computed = false;
int java_lang_ClassLoader::_loader_data_offset = -1;
-int java_lang_ClassLoader::_dependencies_offset = -1;
int java_lang_ClassLoader::parallelCapable_offset = -1;
ClassLoaderData** java_lang_ClassLoader::loader_data_addr(oop loader) {
@@ -2762,18 +2743,6 @@
return *java_lang_ClassLoader::loader_data_addr(loader);
}
-oop java_lang_ClassLoader::dependencies(oop loader) {
- return loader->obj_field(_dependencies_offset);
-}
-
-HeapWord* java_lang_ClassLoader::dependencies_addr(oop loader) {
- if (UseCompressedOops) {
- return (HeapWord*)loader->obj_field_addr<narrowOop>(_dependencies_offset);
- } else {
- return (HeapWord*)loader->obj_field_addr<oop>(_dependencies_offset);
- }
-}
-
void java_lang_ClassLoader::compute_offsets() {
assert(!offsets_computed, "offsets should be initialized only once");
offsets_computed = true;
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -1125,8 +1125,7 @@
// Interface to java.lang.ClassLoader objects
#define CLASSLOADER_INJECTED_FIELDS(macro) \
- macro(java_lang_ClassLoader, loader_data, intptr_signature, false) \
- macro(java_lang_ClassLoader, dependencies, object_signature, false)
+ macro(java_lang_ClassLoader, loader_data, intptr_signature, false)
class java_lang_ClassLoader : AllStatic {
private:
@@ -1135,7 +1134,6 @@
hc_parent_offset = 0
};
static int _loader_data_offset;
- static int _dependencies_offset;
static bool offsets_computed;
static int parent_offset;
static int parallelCapable_offset;
@@ -1146,9 +1144,6 @@
static ClassLoaderData** loader_data_addr(oop loader);
static ClassLoaderData* loader_data(oop loader);
- static oop dependencies(oop loader);
- static HeapWord* dependencies_addr(oop loader);
-
static oop parent(oop loader);
static bool isAncestor(oop loader, oop cl);
--- a/hotspot/src/share/vm/classfile/loaderConstraints.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/classfile/loaderConstraints.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -118,7 +118,7 @@
probe->name()->as_C_string());
for (int i = 0; i < probe->num_loaders(); i++) {
tty->print_cr("[ [%d]: %s", i,
- SystemDictionary::loader_name(probe->loader_data(i)));
+ probe->loader_data(i)->loader_name());
}
}
}
@@ -129,7 +129,7 @@
if (TraceLoaderConstraints) {
ResourceMark rm;
tty->print_cr("[Purging loader %s from constraint for name %s",
- SystemDictionary::loader_name(probe->loader_data(n)),
+ probe->loader_data(n)->loader_name(),
probe->name()->as_C_string()
);
}
@@ -145,7 +145,7 @@
tty->print_cr("[New loader list:");
for (int i = 0; i < probe->num_loaders(); i++) {
tty->print_cr("[ [%d]: %s", i,
- SystemDictionary::loader_name(probe->loader_data(i)));
+ probe->loader_data(i)->loader_name());
}
}
@@ -400,7 +400,7 @@
for (int i = 0; i < p1->num_loaders(); i++) {
tty->print_cr("[ [%d]: %s", i,
- SystemDictionary::loader_name(p1->loader_data(i)));
+ p1->loader_data(i)->loader_name());
}
if (p1->klass() == NULL) {
tty->print_cr("[... and setting class object]");
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -106,9 +106,9 @@
}
-ClassLoaderData* SystemDictionary::register_loader(Handle class_loader) {
+ClassLoaderData* SystemDictionary::register_loader(Handle class_loader, TRAPS) {
if (class_loader() == NULL) return ClassLoaderData::the_null_class_loader_data();
- return ClassLoaderDataGraph::find_or_create(class_loader);
+ return ClassLoaderDataGraph::find_or_create(class_loader, CHECK_NULL);
}
// ----------------------------------------------------------------------------
@@ -591,7 +591,7 @@
// UseNewReflection
// Fix for 4474172; see evaluation for more details
class_loader = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader()));
- ClassLoaderData *loader_data = register_loader(class_loader);
+ ClassLoaderData *loader_data = register_loader(class_loader, CHECK_NULL);
// Do lookup to see if class already exist and the protection domain
// has the right access
@@ -888,7 +888,7 @@
// of the call to resolve_instance_class_or_null().
// See evaluation 6790209 and 4474172 for more details.
class_loader = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader()));
- ClassLoaderData* loader_data = register_loader(class_loader);
+ ClassLoaderData* loader_data = register_loader(class_loader, CHECK_NULL);
unsigned int d_hash = dictionary()->compute_hash(class_name, loader_data);
int d_index = dictionary()->hash_to_index(d_hash);
@@ -948,6 +948,18 @@
TRAPS) {
TempNewSymbol parsed_name = NULL;
+ ClassLoaderData* loader_data;
+ if (host_klass.not_null()) {
+ // Create a new CLD for anonymous class, that uses the same class loader
+ // as the host_klass
+ assert(EnableInvokeDynamic, "");
+ guarantee(host_klass->class_loader() == class_loader(), "should be the same");
+ loader_data = ClassLoaderData::anonymous_class_loader_data(class_loader(), CHECK_NULL);
+ loader_data->record_dependency(host_klass(), CHECK_NULL);
+ } else {
+ loader_data = ClassLoaderData::class_loader_data(class_loader());
+ }
+
// Parse the stream. Note that we do this even though this klass might
// already be present in the SystemDictionary, otherwise we would not
// throw potential ClassFormatErrors.
@@ -959,7 +971,7 @@
// java.lang.Object through resolve_or_fail, not this path.
instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name,
- class_loader,
+ loader_data,
protection_domain,
host_klass,
cp_patches,
@@ -973,8 +985,6 @@
// Parsed name could be null if we threw an error before we got far
// enough along to parse it -- in that case, there is nothing to clean up.
if (parsed_name != NULL) {
- ClassLoaderData* loader_data = class_loader_data(class_loader);
-
unsigned int p_hash = placeholders()->compute_hash(parsed_name,
loader_data);
int p_index = placeholders()->hash_to_index(p_hash);
@@ -987,9 +997,8 @@
if (host_klass.not_null() && k.not_null()) {
assert(EnableInvokeDynamic, "");
+ k->set_host_klass(host_klass());
// If it's anonymous, initialize it now, since nobody else will.
- k->class_loader_data()->record_dependency(host_klass(), CHECK_NULL);
- k->set_host_klass(host_klass());
{
MutexLocker mu_r(Compile_lock, THREAD);
@@ -1002,11 +1011,11 @@
}
// Rewrite and patch constant pool here.
- k->link_class(THREAD);
+ k->link_class(CHECK_NULL);
if (cp_patches != NULL) {
k->constants()->patch_resolved_references(cp_patches);
}
- k->eager_initialize(THREAD);
+ k->eager_initialize(CHECK_NULL);
// notify jvmti
if (JvmtiExport::should_post_class_load()) {
@@ -1039,7 +1048,7 @@
DoObjectLock = false;
}
- ClassLoaderData* loader_data = register_loader(class_loader);
+ ClassLoaderData* loader_data = register_loader(class_loader, CHECK_NULL);
// Make sure we are synchronized on the class loader before we proceed
Handle lockObject = compute_loader_lock_object(class_loader, THREAD);
@@ -1059,7 +1068,7 @@
// java.lang.Object through resolve_or_fail, not this path.
instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name,
- class_loader,
+ loader_data,
protection_domain,
parsed_name,
verify,
@@ -2343,6 +2352,7 @@
// Helper for unpacking the return value from linkMethod and linkCallSite.
static methodHandle unpack_method_and_appendix(Handle mname,
+ KlassHandle accessing_klass,
objArrayHandle appendix_box,
Handle* appendix_result,
TRAPS) {
@@ -2361,6 +2371,12 @@
#endif //PRODUCT
}
(*appendix_result) = Handle(THREAD, appendix);
+ // the target is stored in the cpCache and if a reference to this
+ // MethodName is dropped we need a way to make sure the
+ // class_loader containing this method is kept alive.
+ // FIXME: the appendix might also preserve this dependency.
+ ClassLoaderData* this_key = InstanceKlass::cast(accessing_klass())->class_loader_data();
+ this_key->record_dependency(m->method_holder(), CHECK_NULL); // Can throw OOM
return methodHandle(THREAD, m);
}
}
@@ -2405,7 +2421,7 @@
&args, CHECK_(empty));
Handle mname(THREAD, (oop) result.get_jobject());
(*method_type_result) = method_type;
- return unpack_method_and_appendix(mname, appendix_box, appendix_result, THREAD);
+ return unpack_method_and_appendix(mname, accessing_klass, appendix_box, appendix_result, THREAD);
}
@@ -2596,7 +2612,7 @@
&args, CHECK_(empty));
Handle mname(THREAD, (oop) result.get_jobject());
(*method_type_result) = method_type;
- return unpack_method_and_appendix(mname, appendix_box, appendix_result, THREAD);
+ return unpack_method_and_appendix(mname, caller, appendix_box, appendix_result, THREAD);
}
// Since the identity hash code for symbols changes when the symbols are
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -471,7 +471,7 @@
static void compute_java_system_loader(TRAPS);
// Register a new class loader
- static ClassLoaderData* register_loader(Handle class_loader);
+ static ClassLoaderData* register_loader(Handle class_loader, TRAPS);
private:
// Mirrors for primitive classes (created eagerly)
static oop check_mirror(oop m) {
@@ -531,7 +531,7 @@
InstanceKlass::cast((loader)->klass())->name()->as_C_string() );
}
static const char* loader_name(ClassLoaderData* loader_data) {
- return (loader_data->is_the_null_class_loader_data() ? "<bootloader>" :
+ return (loader_data->class_loader() == NULL ? "<bootloader>" :
InstanceKlass::cast((loader_data->class_loader())->klass())->name()->as_C_string() );
}
--- a/hotspot/src/share/vm/code/icBuffer.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/code/icBuffer.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
@@ -37,21 +38,6 @@
#include "oops/oop.inline2.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/stubRoutines.hpp"
-#ifdef TARGET_ARCH_x86
-# include "assembler_x86.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "assembler_sparc.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "assembler_zero.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_arm
-# include "assembler_arm.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "assembler_ppc.inline.hpp"
-#endif
DEF_STUB_INTERFACE(ICStub);
--- a/hotspot/src/share/vm/code/relocInfo.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/code/relocInfo.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,32 +23,13 @@
*/
#include "precompiled.hpp"
+#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
#include "code/nmethod.hpp"
#include "code/relocInfo.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "utilities/copy.hpp"
-#ifdef TARGET_ARCH_x86
-# include "assembler_x86.inline.hpp"
-# include "nativeInst_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "assembler_sparc.inline.hpp"
-# include "nativeInst_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "assembler_zero.inline.hpp"
-# include "nativeInst_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_arm
-# include "assembler_arm.inline.hpp"
-# include "nativeInst_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "assembler_ppc.inline.hpp"
-# include "nativeInst_ppc.hpp"
-#endif
const RelocationHolder RelocationHolder::none; // its type is relocInfo::none
--- a/hotspot/src/share/vm/code/vmreg.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/code/vmreg.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -27,21 +27,8 @@
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
-#ifdef TARGET_ARCH_x86
-# include "register_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "register_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "register_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_arm
-# include "register_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "register_ppc.hpp"
-#endif
+#include "asm/register.hpp"
+
#ifdef COMPILER2
#include "opto/adlcVMDeps.hpp"
#include "utilities/ostream.hpp"
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -269,10 +269,12 @@
const char* comment,
bool is_blocking) {
assert(!_lock->is_locked(), "bad locking");
+ InstanceKlass* holder = method->method_holder();
_compile_id = compile_id;
_method = method();
- _method_loader = JNIHandles::make_global(_method->method_holder()->class_loader());
+ _method_holder = JNIHandles::make_global(
+ holder->is_anonymous() ? holder->java_mirror(): holder->class_loader());
_osr_bci = osr_bci;
_is_blocking = is_blocking;
_comp_level = comp_level;
@@ -283,7 +285,7 @@
_code_handle = NULL;
_hot_method = NULL;
- _hot_method_loader = NULL;
+ _hot_method_holder = NULL;
_hot_count = hot_count;
_time_queued = 0; // tidy
_comment = comment;
@@ -295,8 +297,12 @@
_hot_method = _method;
} else {
_hot_method = hot_method();
+ // only add loader or mirror if different from _method_holder
+ InstanceKlass* hot_holder = hot_method->method_holder();
+ _hot_method_holder = JNIHandles::make_global(
+ hot_holder->is_anonymous() ? hot_holder->java_mirror() :
+ hot_holder->class_loader());
}
- _hot_method_loader = JNIHandles::make_global(_hot_method->method_holder()->class_loader());
}
}
@@ -321,8 +327,8 @@
void CompileTask::free() {
set_code(NULL);
assert(!_lock->is_locked(), "Should not be locked when freed");
- JNIHandles::destroy_global(_method_loader);
- JNIHandles::destroy_global(_hot_method_loader);
+ JNIHandles::destroy_global(_method_holder);
+ JNIHandles::destroy_global(_hot_method_holder);
}
--- a/hotspot/src/share/vm/compiler/compileBroker.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/compiler/compileBroker.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -43,7 +43,7 @@
Monitor* _lock;
uint _compile_id;
Method* _method;
- jobject _method_loader;
+ jobject _method_holder;
int _osr_bci;
bool _is_complete;
bool _is_success;
@@ -56,7 +56,7 @@
// Fields used for logging why the compilation was initiated:
jlong _time_queued; // in units of os::elapsed_counter()
Method* _hot_method; // which method actually triggered this task
- jobject _hot_method_loader;
+ jobject _hot_method_holder;
int _hot_count; // information about its invocation counter
const char* _comment; // more info about the task
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -40,19 +40,8 @@
#include "runtime/globals_extension.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/vmThread.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
//
// ConcurrentMarkSweepPolicy methods
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -27,18 +27,7 @@
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
#include "gc_implementation/shared/concurrentGCThread.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
+#include "runtime/thread.inline.hpp"
class ConcurrentMarkSweepGeneration;
class CMSCollector;
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -65,8 +65,7 @@
declare_toplevel_type(AFLBinaryTreeDictionary*) \
declare_toplevel_type(LinearAllocBlock) \
declare_toplevel_type(FreeBlockDictionary<FreeChunk>) \
- declare_type(AFLBinaryTreeDictionary, FreeBlockDictionary<FreeChunk>) \
- declare_type(AFLBinaryTreeDictionary, FreeBlockDictionary<FreeChunk>) \
+ declare_type(AFLBinaryTreeDictionary, FreeBlockDictionary<FreeChunk>)
#define VM_INT_CONSTANTS_CMS(declare_constant) \
declare_constant(Generation::ConcurrentMarkSweep) \
--- a/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -28,20 +28,8 @@
#include "runtime/atomic.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
-#include "runtime/thread.hpp"
+#include "runtime/thread.inline.hpp"
#include "utilities/workgroup.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl,
bool consume,
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -302,16 +302,28 @@
for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) {
u_char entry = _array->offset_array(c);
if (c - start_card > BlockOffsetArray::power_to_cards_back(1)) {
- guarantee(entry > N_words, "Should be in logarithmic region");
+ guarantee(entry > N_words,
+ err_msg("Should be in logarithmic region - "
+ "entry: " UINT32_FORMAT ", "
+ "_array->offset_array(c): " UINT32_FORMAT ", "
+ "N_words: " UINT32_FORMAT,
+ entry, _array->offset_array(c), N_words));
}
size_t backskip = BlockOffsetArray::entry_to_cards_back(entry);
size_t landing_card = c - backskip;
guarantee(landing_card >= (start_card - 1), "Inv");
if (landing_card >= start_card) {
- guarantee(_array->offset_array(landing_card) <= entry, "monotonicity");
+ guarantee(_array->offset_array(landing_card) <= entry,
+ err_msg("Monotonicity - landing_card offset: " UINT32_FORMAT ", "
+ "entry: " UINT32_FORMAT,
+ _array->offset_array(landing_card), entry));
} else {
guarantee(landing_card == start_card - 1, "Tautology");
- guarantee(_array->offset_array(landing_card) <= N_words, "Offset value");
+ // Note that N_words is the maximum offset value
+ guarantee(_array->offset_array(landing_card) <= N_words,
+ err_msg("landing card offset: " UINT32_FORMAT ", "
+ "N_words: " UINT32_FORMAT,
+ _array->offset_array(landing_card), N_words));
}
}
}
@@ -536,17 +548,27 @@
// The offset can be 0 if the block starts on a boundary. That
// is checked by an assertion above.
size_t start_index = _array->index_for(blk_start);
- HeapWord* boundary = _array->address_for_index(start_index);
+ HeapWord* boundary = _array->address_for_index(start_index);
assert((_array->offset_array(orig_index) == 0 &&
blk_start == boundary) ||
(_array->offset_array(orig_index) > 0 &&
_array->offset_array(orig_index) <= N_words),
- "offset array should have been set");
+ err_msg("offset array should have been set - "
+ "orig_index offset: " UINT32_FORMAT ", "
+ "blk_start: " PTR_FORMAT ", "
+ "boundary: " PTR_FORMAT,
+ _array->offset_array(orig_index),
+ blk_start, boundary));
for (size_t j = orig_index + 1; j <= end_index; j++) {
assert(_array->offset_array(j) > 0 &&
_array->offset_array(j) <=
(u_char) (N_words+BlockOffsetArray::N_powers-1),
- "offset array should have been set");
+ err_msg("offset array should have been set - "
+ UINT32_FORMAT " not > 0 OR "
+ UINT32_FORMAT " not <= " UINT32_FORMAT,
+ _array->offset_array(j),
+ _array->offset_array(j),
+ (u_char) (N_words+BlockOffsetArray::N_powers-1)));
}
#endif
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -78,7 +78,9 @@
virtual void resize(size_t new_word_size) = 0;
virtual void set_bottom(HeapWord* new_bottom) {
- assert(new_bottom <= _end, "new_bottom > _end");
+ assert(new_bottom <= _end,
+ err_msg("new_bottom (" PTR_FORMAT ") > _end (" PTR_FORMAT ")",
+ new_bottom, _end));
_bottom = new_bottom;
resize(pointer_delta(_end, _bottom));
}
@@ -134,29 +136,42 @@
VirtualSpace _vs;
u_char* _offset_array; // byte array keeping backwards offsets
+ void check_index(size_t index, const char* msg) const {
+ assert(index < _vs.committed_size(),
+ err_msg("%s - "
+ "index: " SIZE_FORMAT ", _vs.committed_size: " SIZE_FORMAT,
+ msg, index, _vs.committed_size()));
+ }
+
+ void check_offset(size_t offset, const char* msg) const {
+ assert(offset <= N_words,
+ err_msg("%s - "
+ "offset: " UINT32_FORMAT", N_words: " UINT32_FORMAT,
+ msg, offset, N_words));
+ }
+
// Bounds checking accessors:
// For performance these have to devolve to array accesses in product builds.
u_char offset_array(size_t index) const {
- assert(index < _vs.committed_size(), "index out of range");
+ check_index(index, "index out of range");
return _offset_array[index];
}
void set_offset_array(size_t index, u_char offset) {
- assert(index < _vs.committed_size(), "index out of range");
- assert(offset <= N_words, "offset too large");
+ check_index(index, "index out of range");
+ check_offset(offset, "offset too large");
_offset_array[index] = offset;
}
void set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
- assert(index < _vs.committed_size(), "index out of range");
+ check_index(index, "index out of range");
assert(high >= low, "addresses out of order");
- assert(pointer_delta(high, low) <= N_words, "offset too large");
+ check_offset(pointer_delta(high, low), "offset too large");
_offset_array[index] = (u_char) pointer_delta(high, low);
}
void set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
- assert(index_for(right - 1) < _vs.committed_size(),
- "right address out of range");
+ check_index(index_for(right - 1), "right address out of range");
assert(left < right, "Heap addresses out of order");
size_t num_cards = pointer_delta(right, left) >> LogN_words;
if (UseMemSetInBOT) {
@@ -171,7 +186,7 @@
}
void set_offset_array(size_t left, size_t right, u_char offset) {
- assert(right < _vs.committed_size(), "right address out of range");
+ check_index(right, "right index out of range");
assert(left <= right, "indexes out of order");
size_t num_cards = right - left + 1;
if (UseMemSetInBOT) {
@@ -186,11 +201,10 @@
}
void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
- assert(index < _vs.committed_size(), "index out of range");
+ check_index(index, "index out of range");
assert(high >= low, "addresses out of order");
- assert(pointer_delta(high, low) <= N_words, "offset too large");
- assert(_offset_array[index] == pointer_delta(high, low),
- "Wrong offset");
+ check_offset(pointer_delta(high, low), "offset too large");
+ assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset");
}
bool is_card_boundary(HeapWord* p) const;
@@ -481,7 +495,6 @@
blk_start, blk_end);
}
-
public:
G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -49,16 +49,17 @@
char* pc = (char*)p;
assert(pc >= (char*)_reserved.start() &&
pc < (char*)_reserved.end(),
- "p not in range.");
+ err_msg("p (" PTR_FORMAT ") not in reserved [" PTR_FORMAT ", " PTR_FORMAT ")",
+ p, (char*)_reserved.start(), (char*)_reserved.end()));
size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char));
size_t result = delta >> LogN;
- assert(result < _vs.committed_size(), "bad index from address");
+ check_index(result, "bad index from address");
return result;
}
inline HeapWord*
G1BlockOffsetSharedArray::address_for_index(size_t index) const {
- assert(index < _vs.committed_size(), "bad index");
+ check_index(index, "index out of range");
HeapWord* result = _reserved.start() + (index << LogN_words);
assert(result >= _reserved.start() && result < _reserved.end(),
err_msg("bad address from index result " PTR_FORMAT
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -3690,6 +3690,7 @@
g1_policy()->print_heap_transition();
gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
}
+ gclog_or_tty->flush();
}
bool
@@ -4036,10 +4037,11 @@
#endif
gc_epilogue(false);
-
- log_gc_footer(os::elapsedTime() - pause_start_sec);
}
+ // Print the remainder of the GC log output.
+ log_gc_footer(os::elapsedTime() - pause_start_sec);
+
// It is not yet to safe to tell the concurrent mark to
// start as we have some optional output below. We don't want the
// output from the concurrent mark thread interfering with this
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -27,19 +27,7 @@
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/satbQueue.hpp"
#include "runtime/mutexLocker.hpp"
-#include "runtime/thread.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
+#include "runtime/thread.inline.hpp"
G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
int max_covered_regions) :
--- a/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -28,18 +28,7 @@
#include "memory/allocation.inline.hpp"
#include "runtime/mutex.hpp"
#include "runtime/mutexLocker.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
+#include "runtime/thread.inline.hpp"
PtrQueue::PtrQueue(PtrQueueSet* qset, bool perm, bool active) :
_qset(qset), _buf(NULL), _index(0), _active(active),
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -52,14 +52,22 @@
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
+
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
+ CLDToOopClosure mark_and_push_from_clds(&mark_and_push_closure, true);
CodeBlobToOopClosure mark_and_push_in_blobs(&mark_and_push_closure, /*do_marking=*/ true);
if (_java_thread != NULL)
- _java_thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
+ _java_thread->oops_do(
+ &mark_and_push_closure,
+ &mark_and_push_from_clds,
+ &mark_and_push_in_blobs);
if (_vm_thread != NULL)
- _vm_thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
+ _vm_thread->oops_do(
+ &mark_and_push_closure,
+ &mark_and_push_from_clds,
+ &mark_and_push_in_blobs);
// Do the real work
cm->follow_marking_stacks();
@@ -89,7 +97,8 @@
{
ResourceMark rm;
CodeBlobToOopClosure each_active_code_blob(&mark_and_push_closure, /*do_marking=*/ true);
- Threads::oops_do(&mark_and_push_closure, &each_active_code_blob);
+ CLDToOopClosure mark_and_push_from_cld(&mark_and_push_closure);
+ Threads::oops_do(&mark_and_push_closure, &mark_and_push_from_cld, &each_active_code_blob);
}
break;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -495,8 +495,9 @@
ParallelScavengeHeap::ParStrongRootsScope psrs;
Universe::oops_do(mark_and_push_closure());
JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles
+ CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
- Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
+ Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
ObjectSynchronizer::oops_do(mark_and_push_closure());
FlatProfiler::oops_do(mark_and_push_closure());
Management::oops_do(mark_and_push_closure());
@@ -584,7 +585,8 @@
// General strong roots.
Universe::oops_do(adjust_root_pointer_closure());
JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
- Threads::oops_do(adjust_root_pointer_closure(), NULL);
+ CLDToOopClosure adjust_from_cld(adjust_root_pointer_closure());
+ Threads::oops_do(adjust_root_pointer_closure(), &adjust_from_cld, NULL);
ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
FlatProfiler::oops_do(adjust_root_pointer_closure());
Management::oops_do(adjust_root_pointer_closure());
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -2436,7 +2436,8 @@
// General strong roots.
Universe::oops_do(adjust_root_pointer_closure());
JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
- Threads::oops_do(adjust_root_pointer_closure(), NULL);
+ CLDToOopClosure adjust_from_cld(adjust_root_pointer_closure());
+ Threads::oops_do(adjust_root_pointer_closure(), &adjust_from_cld, NULL);
ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
FlatProfiler::oops_do(adjust_root_pointer_closure());
Management::oops_do(adjust_root_pointer_closure());
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -65,7 +65,8 @@
case threads:
{
ResourceMark rm;
- Threads::oops_do(&roots_closure, NULL);
+ CLDToOopClosure* cld_closure = NULL; // Not needed. All CLDs are already visited.
+ Threads::oops_do(&roots_closure, cld_closure, NULL);
}
break;
@@ -120,13 +121,14 @@
PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
PSScavengeRootsClosure roots_closure(pm);
+ CLDToOopClosure* roots_from_clds = NULL; // Not needed. All CLDs are already visited.
CodeBlobToOopClosure roots_in_blobs(&roots_closure, /*do_marking=*/ true);
if (_java_thread != NULL)
- _java_thread->oops_do(&roots_closure, &roots_in_blobs);
+ _java_thread->oops_do(&roots_closure, roots_from_clds, &roots_in_blobs);
if (_vm_thread != NULL)
- _vm_thread->oops_do(&roots_closure, &roots_in_blobs);
+ _vm_thread->oops_do(&roots_closure, roots_from_clds, &roots_in_blobs);
// Do the real work
pm->drain_stacks(false);
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -28,19 +28,7 @@
#include "gc_implementation/shared/spaceDecorator.hpp"
#include "memory/sharedHeap.hpp"
#include "oops/oop.inline.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
-
+#include "runtime/thread.inline.hpp"
MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment) {
_lgrp_spaces = new (ResourceObj::C_HEAP, mtGC) GrowableArray<LGRPSpace*>(0, true);
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -30,19 +30,8 @@
#include "oops/oop.inline.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "runtime/init.hpp"
+#include "runtime/thread.inline.hpp"
#include "services/heapDumper.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
#ifdef ASSERT
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -289,11 +289,6 @@
// (A scavenge is a GC which is not a full GC.)
virtual bool is_scavengable(const void *p) = 0;
- // Returns "TRUE" if "p" is a method oop in the
- // current heap, with high probability. This predicate
- // is not stable, in general.
- bool is_valid_method(Method* p) const;
-
void set_gc_cause(GCCause::Cause v) {
if (UsePerfData) {
_gc_lastcause = _gc_cause;
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -31,21 +31,9 @@
#include "oops/arrayOop.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/sharedRuntime.hpp"
-#include "runtime/thread.hpp"
+#include "runtime/thread.inline.hpp"
#include "services/lowMemoryDetector.hpp"
#include "utilities/copy.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
// Inline allocation implementations.
@@ -242,36 +230,6 @@
return (oop)obj;
}
-// Returns "TRUE" if "p" is a method oop in the
-// current heap with high probability. NOTE: The main
-// current consumers of this interface are Forte::
-// and ThreadProfiler::. In these cases, the
-// interpreter frame from which "p" came, may be
-// under construction when sampled asynchronously, so
-// the clients want to check that it represents a
-// valid method before using it. Nonetheless since
-// the clients do not typically lock out GC, the
-// predicate is_valid_method() is not stable, so
-// it is possible that by the time "p" is used, it
-// is no longer valid.
-inline bool CollectedHeap::is_valid_method(Method* p) const {
- return
- p != NULL &&
-
- // Check whether "method" is metadata
- p->is_metadata() &&
-
- // See if GC is active; however, there is still an
- // apparently unavoidable window after this call
- // and before the client of this interface uses "p".
- // If the client chooses not to lock out GC, then
- // it's a risk the client must accept.
- !is_gc_active() &&
-
- // Check that p is a Method*.
- p->is_method();
-}
-
inline void CollectedHeap::oop_iterate_no_header(OopClosure* cl) {
NoHeaderExtendedOopClosure no_header_cl(cl);
oop_iterate(&no_header_cl);
--- a/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -27,6 +27,7 @@
#include "code/stubs.hpp"
#include "interpreter/bytecodes.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/top.hpp"
#ifdef TARGET_ARCH_MODEL_x86_32
@@ -47,18 +48,6 @@
#ifdef TARGET_ARCH_MODEL_ppc
# include "interp_masm_ppc.hpp"
#endif
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
// This file contains the platform-independent parts
// of the abstract interpreter and the abstract interpreter generator.
--- a/hotspot/src/share/vm/interpreter/bytecodes.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/interpreter/bytecodes.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -423,7 +423,9 @@
static bool is_zero_const (Code code) { return (code == _aconst_null || code == _iconst_0
|| code == _fconst_0 || code == _dconst_0); }
static bool is_invoke (Code code) { return (_invokevirtual <= code && code <= _invokedynamic); }
-
+ static bool has_receiver (Code code) { assert(is_invoke(code), ""); return code == _invokevirtual ||
+ code == _invokespecial ||
+ code == _invokeinterface; }
static bool has_optional_appendix(Code code) { return code == _invokedynamic || code == _invokehandle; }
static int compute_flags (const char* format, int more_flags = 0); // compute the flags
--- a/hotspot/src/share/vm/interpreter/interpreter.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/interpreter/interpreter.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,9 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "compiler/disassembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/bytecodeInterpreter.hpp"
#include "interpreter/interpreter.hpp"
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,6 +26,7 @@
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "compiler/compileBroker.hpp"
+#include "compiler/disassembler.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -31,19 +31,8 @@
#include "oops/method.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/signature.hpp"
+#include "runtime/thread.inline.hpp"
#include "utilities/top.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
// The InterpreterRuntime is called by the interpreter for everything
// that cannot/should not be dealt with in assembly and needs C support.
--- a/hotspot/src/share/vm/interpreter/linkResolver.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -43,19 +43,8 @@
#include "runtime/handles.inline.hpp"
#include "runtime/reflection.hpp"
#include "runtime/signature.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/vmThread.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
//------------------------------------------------------------------------------------------------------------------------
// Implementation of FieldAccessInfo
--- a/hotspot/src/share/vm/memory/allocation.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/memory/allocation.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -66,10 +66,17 @@
}
bool MetaspaceObj::is_metadata() const {
- // ClassLoaderDataGraph::contains((address)this); has lock inversion problems
+ // GC Verify checks use this in guarantees.
+ // TODO: either replace them with is_metaspace_object() or remove them.
+ // is_metaspace_object() is slower than this test. This test doesn't
+ // seem very useful for metaspace objects anymore though.
return !Universe::heap()->is_in_reserved(this);
}
+bool MetaspaceObj::is_metaspace_object() const {
+ return Metaspace::contains((void*)this);
+}
+
void MetaspaceObj::print_address_on(outputStream* st) const {
st->print(" {"INTPTR_FORMAT"}", this);
}
--- a/hotspot/src/share/vm/memory/allocation.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/memory/allocation.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -245,6 +245,7 @@
class MetaspaceObj {
public:
bool is_metadata() const;
+ bool is_metaspace_object() const; // more specific test but slower
bool is_shared() const;
void print_address_on(outputStream* st) const; // nonvirtual address printing
--- a/hotspot/src/share/vm/memory/binaryTreeDictionary.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/memory/binaryTreeDictionary.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -290,7 +290,7 @@
assert(chunk->list() == this, "list should be set for chunk");
assert(tail() != NULL, "The tree list is embedded in the first chunk");
// which means that the list can never be empty.
- assert(!verify_chunk_in_free_list(chunk), "Double entry");
+ assert(!this->verify_chunk_in_free_list(chunk), "Double entry");
assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
@@ -300,7 +300,7 @@
assert(!tail() || size() == tail()->size(), "Wrong sized chunk in list");
FreeList_t<Chunk_t>::increment_count();
- debug_only(increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
+ debug_only(this->increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
}
@@ -314,7 +314,7 @@
assert(chunk->list() == this, "list should be set for chunk");
assert(head() != NULL, "The tree list is embedded in the first chunk");
assert(chunk != NULL, "returning NULL chunk");
- assert(!verify_chunk_in_free_list(chunk), "Double entry");
+ assert(!this->verify_chunk_in_free_list(chunk), "Double entry");
assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
@@ -328,7 +328,7 @@
head()->link_after(chunk);
assert(!head() || size() == head()->size(), "Wrong sized chunk in list");
FreeList_t<Chunk_t>::increment_count();
- debug_only(increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
+ debug_only(this->increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
}
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -37,19 +37,8 @@
#include "runtime/globals_extension.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/vmThread.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
#ifndef SERIALGC
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -38,20 +38,9 @@
#include "oops/instanceRefKlass.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
+#include "runtime/thread.inline.hpp"
#include "utilities/copy.hpp"
#include "utilities/stack.inline.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
//
// DefNewGeneration functions.
--- a/hotspot/src/share/vm/memory/filemap.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/memory/filemap.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/symbolTable.hpp"
+#include "classfile/altHashing.hpp"
#include "memory/filemap.hpp"
#include "runtime/arguments.hpp"
#include "runtime/java.hpp"
@@ -82,8 +83,37 @@
close();
}
+// Fill in the fileMapInfo structure with data about this VM instance.
-// Fill in the fileMapInfo structure with data about this VM instance.
+// This method copies the vm version info into header_version. If the version is too
+// long then a truncated version, which has a hash code appended to it, is copied.
+//
+// Using a template enables this method to verify that header_version is an array of
+// length JVM_IDENT_MAX. This ensures that the code that writes to the CDS file and
+// the code that reads the CDS file will both use the same size buffer. Hence, will
+// use identical truncation. This is necessary for matching of truncated versions.
+template <int N> static void get_header_version(char (&header_version) [N]) {
+ assert(N == JVM_IDENT_MAX, "Bad header_version size");
+
+ const char *vm_version = VM_Version::internal_vm_info_string();
+ const int version_len = (int)strlen(vm_version);
+
+ if (version_len < (JVM_IDENT_MAX-1)) {
+ strcpy(header_version, vm_version);
+
+ } else {
+ // Get the hash value. Use a static seed because the hash needs to return the same
+ // value over multiple jvm invocations.
+ unsigned int hash = AltHashing::murmur3_32(8191, (const jbyte*)vm_version, version_len);
+
+ // Truncate the ident, saving room for the 8 hex character hash value.
+ strncpy(header_version, vm_version, JVM_IDENT_MAX-9);
+
+ // Append the hash code as eight hex digits.
+ sprintf(&header_version[JVM_IDENT_MAX-9], "%08x", hash);
+ header_version[JVM_IDENT_MAX-1] = 0; // Null terminate.
+ }
+}
void FileMapInfo::populate_header(size_t alignment) {
_header._magic = 0xf00baba2;
@@ -95,13 +125,7 @@
// invoked with.
// JVM version string ... changes on each build.
- const char *vm_version = VM_Version::internal_vm_info_string();
- if (strlen(vm_version) < (JVM_IDENT_MAX-1)) {
- strcpy(_header._jvm_ident, vm_version);
- } else {
- fail_stop("JVM Ident field for shared archive is too long"
- " - truncated to <%s>", _header._jvm_ident);
- }
+ get_header_version(_header._jvm_ident);
// Build checks on classpath and jar files
_header._num_jars = 0;
@@ -434,8 +458,9 @@
fail_continue("The shared archive file has a bad magic number.");
return false;
}
- if (strncmp(_header._jvm_ident, VM_Version::internal_vm_info_string(),
- JVM_IDENT_MAX-1) != 0) {
+ char header_version[JVM_IDENT_MAX];
+ get_header_version(header_version);
+ if (strncmp(_header._jvm_ident, header_version, JVM_IDENT_MAX-1) != 0) {
fail_continue("The shared archive file was created by a different"
" version or build of HotSpot.");
return false;
--- a/hotspot/src/share/vm/memory/freeBlockDictionary.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/memory/freeBlockDictionary.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -29,18 +29,7 @@
#include "memory/freeBlockDictionary.hpp"
#include "memory/metablock.hpp"
#include "memory/metachunk.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
+#include "runtime/thread.inline.hpp"
#ifndef PRODUCT
template <class Chunk> Mutex* FreeBlockDictionary<Chunk>::par_lock() const {
--- a/hotspot/src/share/vm/memory/gcLocker.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/memory/gcLocker.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -29,21 +29,18 @@
#include "memory/genCollectedHeap.hpp"
#include "memory/universe.hpp"
#include "oops/oop.hpp"
+#include "runtime/thread.inline.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
-# include "thread_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
-# include "thread_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
-# include "thread_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
-# include "thread_bsd.inline.hpp"
#endif
// The direct lock/unlock calls do not force a collection if an unlock
--- a/hotspot/src/share/vm/memory/genMarkSweep.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/memory/genMarkSweep.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -43,21 +43,10 @@
#include "runtime/fprofiler.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/synchronizer.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/copy.hpp"
#include "utilities/events.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
bool clear_all_softrefs) {
--- a/hotspot/src/share/vm/memory/iterator.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/memory/iterator.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -30,6 +30,10 @@
k->oops_do(_oop_closure);
}
+void CLDToOopClosure::do_cld(ClassLoaderData* cld) {
+ cld->oops_do(_oop_closure, &_klass_closure, _must_claim_cld);
+}
+
void ObjectToOopClosure::do_object(oop obj) {
obj->oop_iterate(_cl);
}
--- a/hotspot/src/share/vm/memory/iterator.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/memory/iterator.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -135,6 +135,20 @@
virtual void do_klass(Klass* k);
};
+class CLDToOopClosure {
+ OopClosure* _oop_closure;
+ KlassToOopClosure _klass_closure;
+ bool _must_claim_cld;
+
+ public:
+ CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) :
+ _oop_closure(oop_closure),
+ _klass_closure(oop_closure),
+ _must_claim_cld(must_claim_cld) {}
+
+ void do_cld(ClassLoaderData* cld);
+};
+
// ObjectClosure is used for iterating through an object space
class ObjectClosure : public Closure {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/memory/metablock.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "memory/allocation.hpp"
+#include "memory/metablock.hpp"
+#include "utilities/copy.hpp"
+#include "utilities/debug.hpp"
+
+// Blocks of space for metadata are allocated out of Metachunks.
+//
+// Metachunk are allocated out of MetadataVirtualspaces and once
+// allocated there is no explicit link between a Metachunk and
+// the MetadataVirtualspaces from which it was allocated.
+//
+// Each SpaceManager maintains a
+// list of the chunks it is using and the current chunk. The current
+// chunk is the chunk from which allocations are done. Space freed in
+// a chunk is placed on the free list of blocks (BlockFreelist) and
+// reused from there.
+//
+// Future modification
+//
+// The Metachunk can conceivable be replaced by the Chunk in
+// allocation.hpp. Note that the latter Chunk is the space for
+// allocation (allocations from the chunk are out of the space in
+// the Chunk after the header for the Chunk) where as Metachunks
+// point to space in a VirtualSpace. To replace Metachunks with
+// Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
+size_t Metablock::_min_block_byte_size = sizeof(Metablock);
+
+#ifdef ASSERT
+size_t Metablock::_overhead =
+ Chunk::aligned_overhead_size(sizeof(Metablock)) / BytesPerWord;
+#else
+size_t Metablock::_overhead = 0;
+#endif
+
+// New blocks returned by the Metaspace are zero initialized.
+// We should fix the constructors to not assume this instead.
+Metablock* Metablock::initialize(MetaWord* p, size_t word_size) {
+ if (p == NULL) {
+ return NULL;
+ }
+
+ Metablock* result = (Metablock*) p;
+
+ // Clear the memory
+ Copy::fill_to_aligned_words((HeapWord*)result, word_size);
+#ifdef ASSERT
+ result->set_word_size(word_size);
+#endif
+ return result;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/memory/metachunk.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "memory/allocation.hpp"
+#include "memory/metachunk.hpp"
+#include "utilities/copy.hpp"
+#include "utilities/debug.hpp"
+
+//
+// Future modification
+//
+// The Metachunk can conceivable be replaced by the Chunk in
+// allocation.hpp. Note that the latter Chunk is the space for
+// allocation (allocations from the chunk are out of the space in
+// the Chunk after the header for the Chunk) where as Metachunks
+// point to space in a VirtualSpace. To replace Metachunks with
+// Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
+
+const size_t metadata_chunk_initialize = 0xf7f7f7f7;
+
+size_t Metachunk::_overhead =
+ Chunk::aligned_overhead_size(sizeof(Metachunk)) / BytesPerWord;
+
+// Metachunk methods
+
+Metachunk* Metachunk::initialize(MetaWord* ptr, size_t word_size) {
+ // Set bottom, top, and end. Allow space for the Metachunk itself
+ Metachunk* chunk = (Metachunk*) ptr;
+
+ MetaWord* chunk_bottom = ptr + _overhead;
+ chunk->set_bottom(ptr);
+ chunk->set_top(chunk_bottom);
+ MetaWord* chunk_end = ptr + word_size;
+ assert(chunk_end > chunk_bottom, "Chunk must be too small");
+ chunk->set_end(chunk_end);
+ chunk->set_next(NULL);
+ chunk->set_word_size(word_size);
+#ifdef ASSERT
+ size_t data_word_size = pointer_delta(chunk_end, chunk_bottom, sizeof(MetaWord));
+ Copy::fill_to_words((HeapWord*) chunk_bottom, data_word_size, metadata_chunk_initialize);
+#endif
+ return chunk;
+}
+
+
+MetaWord* Metachunk::allocate(size_t word_size) {
+ MetaWord* result = NULL;
+ // If available, bump the pointer to allocate.
+ if (free_word_size() >= word_size) {
+ result = _top;
+ _top = _top + word_size;
+ }
+ return result;
+}
+
+// _bottom points to the start of the chunk including the overhead.
+size_t Metachunk::used_word_size() {
+ return pointer_delta(_top, _bottom, sizeof(MetaWord));
+}
+
+size_t Metachunk::free_word_size() {
+ return pointer_delta(_end, _top, sizeof(MetaWord));
+}
+
+size_t Metachunk::capacity_word_size() {
+ return pointer_delta(_end, _bottom, sizeof(MetaWord));
+}
+
+void Metachunk::print_on(outputStream* st) const {
+ st->print_cr("Metachunk:"
+ " bottom " PTR_FORMAT " top " PTR_FORMAT
+ " end " PTR_FORMAT " size " SIZE_FORMAT,
+ bottom(), top(), end(), word_size());
+}
+
+#ifndef PRODUCT
+void Metachunk::mangle() {
+ // Mangle the payload of the chunk and not the links that
+ // maintain list of chunks.
+ HeapWord* start = (HeapWord*)(bottom() + overhead());
+ size_t word_size = capacity_word_size() - overhead();
+ Copy::fill_to_words(start, word_size, metadata_chunk_initialize);
+}
+#endif // PRODUCT
+
+void Metachunk::verify() {
+#ifdef ASSERT
+ // Cannot walk through the blocks unless the blocks have
+ // headers with sizes.
+ assert(_bottom <= _top &&
+ _top <= _end,
+ "Chunk has been smashed");
+#endif
+ return;
+}
--- a/hotspot/src/share/vm/memory/metachunk.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/memory/metachunk.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -123,9 +123,7 @@
void assert_is_mangled() const {/* Don't check "\*/}
-#ifdef ASSERT
- void mangle();
-#endif // ASSERT
+ NOT_PRODUCT(void mangle();)
void print_on(outputStream* st) const;
void verify();
--- a/hotspot/src/share/vm/memory/metaspace.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/memory/metaspace.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -36,6 +36,7 @@
#include "memory/universe.hpp"
#include "runtime/globals.hpp"
#include "runtime/mutex.hpp"
+#include "runtime/orderAccess.hpp"
#include "services/memTracker.hpp"
#include "utilities/copy.hpp"
#include "utilities/debug.hpp"
@@ -51,7 +52,6 @@
const uint metadata_deallocate_a_lot_block = 10;
const uint metadata_deallocate_a_lock_chunk = 3;
size_t const allocation_from_dictionary_limit = 64 * K;
-const size_t metadata_chunk_initialize = 0xf7f7f7f7;
const size_t metadata_deallocate = 0xf5f5f5f5;
MetaWord* last_allocated = 0;
@@ -91,23 +91,6 @@
// chunk is the chunk from which allocations are done. Space freed in
// a chunk is placed on the free list of blocks (BlockFreelist) and
// reused from there.
-//
-// Future modification
-//
-// The Metachunk can conceivable be replaced by the Chunk in
-// allocation.hpp. Note that the latter Chunk is the space for
-// allocation (allocations from the chunk are out of the space in
-// the Chunk after the header for the Chunk) where as Metachunks
-// point to space in a VirtualSpace. To replace Metachunks with
-// Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
-size_t Metablock::_min_block_byte_size = sizeof(Metablock);
-#ifdef ASSERT
- size_t Metablock::_overhead =
- Chunk::aligned_overhead_size(sizeof(Metablock)) / BytesPerWord;
-#else
- size_t Metablock::_overhead = 0;
-#endif
-
// Pointer to list of Metachunks.
class ChunkList VALUE_OBJ_CLASS_SPEC {
@@ -325,10 +308,12 @@
bool expand_by(size_t words, bool pre_touch = false);
bool shrink_by(size_t words);
+#ifdef ASSERT
// Debug support
static void verify_virtual_space_total();
static void verify_virtual_space_count();
void mangle();
+#endif
void print_on(outputStream* st) const;
};
@@ -621,16 +606,15 @@
void locked_print_chunks_in_use_on(outputStream* st) const;
void verify();
+ void verify_chunk_size(Metachunk* chunk);
+ NOT_PRODUCT(void mangle_freed_chunks();)
#ifdef ASSERT
- void mangle_freed_chunks();
void verify_allocation_total();
#endif
};
uint const SpaceManager::_small_chunk_limit = 4;
-
-
const char* SpaceManager::_expand_lock_name =
"SpaceManager chunk allocation lock";
const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
@@ -639,103 +623,6 @@
SpaceManager::_expand_lock_name,
Mutex::_allow_vm_block_flag);
-size_t Metachunk::_overhead =
- Chunk::aligned_overhead_size(sizeof(Metachunk)) / BytesPerWord;
-
-// New blocks returned by the Metaspace are zero initialized.
-// We should fix the constructors to not assume this instead.
-Metablock* Metablock::initialize(MetaWord* p, size_t word_size) {
- if (p == NULL) {
- return NULL;
- }
-
- Metablock* result = (Metablock*) p;
-
- // Clear the memory
- Copy::fill_to_aligned_words((HeapWord*)result, word_size);
-#ifdef ASSERT
- result->set_word_size(word_size);
-#endif
- return result;
-}
-
-// Metachunk methods
-
-Metachunk* Metachunk::initialize(MetaWord* ptr, size_t word_size) {
- // Set bottom, top, and end. Allow space for the Metachunk itself
- Metachunk* chunk = (Metachunk*) ptr;
-
- MetaWord* chunk_bottom = ptr + _overhead;
- chunk->set_bottom(ptr);
- chunk->set_top(chunk_bottom);
- MetaWord* chunk_end = ptr + word_size;
- assert(chunk_end > chunk_bottom, "Chunk must be too small");
- chunk->set_end(chunk_end);
- chunk->set_next(NULL);
- chunk->set_word_size(word_size);
-#ifdef ASSERT
- size_t data_word_size = pointer_delta(chunk_end, chunk_bottom, sizeof(MetaWord));
- Copy::fill_to_words((HeapWord*) chunk_bottom, data_word_size, metadata_chunk_initialize);
-#endif
- return chunk;
-}
-
-
-MetaWord* Metachunk::allocate(size_t word_size) {
- MetaWord* result = NULL;
- // If available, bump the pointer to allocate.
- if (free_word_size() >= word_size) {
- result = _top;
- _top = _top + word_size;
- }
- return result;
-}
-
-// _bottom points to the start of the chunk including the overhead.
-size_t Metachunk::used_word_size() {
- return pointer_delta(_top, _bottom, sizeof(MetaWord));
-}
-
-size_t Metachunk::free_word_size() {
- return pointer_delta(_end, _top, sizeof(MetaWord));
-}
-
-size_t Metachunk::capacity_word_size() {
- return pointer_delta(_end, _bottom, sizeof(MetaWord));
-}
-
-void Metachunk::print_on(outputStream* st) const {
- st->print_cr("Metachunk:"
- " bottom " PTR_FORMAT " top " PTR_FORMAT
- " end " PTR_FORMAT " size " SIZE_FORMAT,
- bottom(), top(), end(), word_size());
-}
-
-#ifdef ASSERT
-void Metachunk::mangle() {
- // Mangle the payload of the chunk and not the links that
- // maintain list of chunks.
- HeapWord* start = (HeapWord*)(bottom() + overhead());
- size_t word_size = capacity_word_size() - overhead();
- Copy::fill_to_words(start, word_size, metadata_chunk_initialize);
-}
-#endif // ASSERT
-
-void Metachunk::verify() {
-#ifdef ASSERT
- // Cannot walk through the blocks unless the blocks have
- // headers with sizes.
- assert(_bottom <= _top &&
- _top <= _end,
- "Chunk has been smashed");
- assert(SpaceManager::is_humongous(_word_size) ||
- _word_size == SpaceManager::MediumChunk ||
- _word_size == SpaceManager::SmallChunk,
- "Chunk size is wrong");
-#endif
- return;
-}
-
// BlockFreelist methods
BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
@@ -917,10 +804,12 @@
vs->high_boundary());
}
+#ifdef ASSERT
void VirtualSpaceNode::mangle() {
size_t word_size = capacity_words_in_vs();
Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
}
+#endif // ASSERT
// VirtualSpaceList methods
// Space allocated from the VirtualSpace
@@ -1007,6 +896,8 @@
delete new_entry;
return false;
} else {
+ // ensure lock-free iteration sees fully initialized node
+ OrderAccess::storestore();
link_vs(new_entry, vs_word_size);
return true;
}
@@ -1096,7 +987,6 @@
}
}
-#ifndef PRODUCT
bool VirtualSpaceList::contains(const void *ptr) {
VirtualSpaceNode* list = virtual_space_list();
VirtualSpaceListIterator iter(list);
@@ -1108,7 +998,6 @@
}
return false;
}
-#endif // PRODUCT
// MetaspaceGC methods
@@ -1985,16 +1874,14 @@
locked_print_chunks_in_use_on(gclog_or_tty);
}
+ // Mangle freed memory.
+ NOT_PRODUCT(mangle_freed_chunks();)
+
// Have to update before the chunks_in_use lists are emptied
// below.
chunk_manager->inc_free_chunks_total(sum_capacity_in_chunks_in_use(),
sum_count_in_chunks_in_use());
-#ifdef ASSERT
- // Mangle freed memory.
- mangle_freed_chunks();
-#endif // ASSERT
-
// Add all the chunks in use by this space manager
// to the global list of free chunks.
@@ -2212,12 +2099,21 @@
Metachunk* curr = chunks_in_use(i);
while (curr != NULL) {
curr->verify();
+ verify_chunk_size(curr);
curr = curr->next();
}
}
}
}
+void SpaceManager::verify_chunk_size(Metachunk* chunk) {
+ assert(is_humongous(chunk->word_size()) ||
+ chunk->word_size() == MediumChunk ||
+ chunk->word_size() == SmallChunk,
+ "Chunk size is wrong");
+ return;
+}
+
#ifdef ASSERT
void SpaceManager::verify_allocation_total() {
#if 0
@@ -2273,7 +2169,7 @@
" waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
}
-#ifdef ASSERT
+#ifndef PRODUCT
void SpaceManager::mangle_freed_chunks() {
for (ChunkIndex index = SmallIndex;
index < NumberOfInUseLists;
@@ -2291,11 +2187,16 @@
}
}
}
-#endif // ASSERT
+#endif // PRODUCT
// MetaspaceAux
+size_t MetaspaceAux::used_in_bytes() {
+ return (Metaspace::class_space_list()->used_words_sum() +
+ Metaspace::space_list()->used_words_sum()) * BytesPerWord;
+}
+
size_t MetaspaceAux::used_in_bytes(Metaspace::MetadataType mdtype) {
size_t used = 0;
ClassLoaderDataGraphMetaspaceIterator iter;
@@ -2324,6 +2225,11 @@
// The total words available for metadata allocation. This
// uses Metaspace capacity_words() which is the total words
// in chunks allocated for a Metaspace.
+size_t MetaspaceAux::capacity_in_bytes() {
+ return (Metaspace::class_space_list()->capacity_words_sum() +
+ Metaspace::space_list()->capacity_words_sum()) * BytesPerWord;
+}
+
size_t MetaspaceAux::capacity_in_bytes(Metaspace::MetadataType mdtype) {
size_t capacity = free_chunks_total(mdtype);
ClassLoaderDataGraphMetaspaceIterator iter;
@@ -2336,6 +2242,11 @@
return capacity * BytesPerWord;
}
+size_t MetaspaceAux::reserved_in_bytes() {
+ return (Metaspace::class_space_list()->virtual_space_total() +
+ Metaspace::space_list()->virtual_space_total()) * BytesPerWord;
+}
+
size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
size_t reserved = (mdtype == Metaspace::ClassType) ?
Metaspace::class_space_list()->virtual_space_total() :
@@ -2739,15 +2650,17 @@
}
}
-#ifndef PRODUCT
-bool Metaspace::contains(const void * ptr) const {
+bool Metaspace::contains(const void * ptr) {
if (MetaspaceShared::is_in_shared_space(ptr)) {
return true;
}
- MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
+ // This is checked while unlocked. As long as the virtualspaces are added
+ // at the end, the pointer will be in one of them. The virtual spaces
+ // aren't deleted presently. When they are, some sort of locking might
+ // be needed. Note, locking this can cause inversion problems with the
+ // caller in MetaspaceObj::is_metadata() function.
return space_list()->contains(ptr) || class_space_list()->contains(ptr);
}
-#endif
void Metaspace::verify() {
vsm()->verify();
--- a/hotspot/src/share/vm/memory/metaspace.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/memory/metaspace.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -135,11 +135,7 @@
static bool is_initialized() { return _class_space_list != NULL; }
-#ifndef PRODUCT
- bool contains(const void *ptr) const;
- bool contains_class(const void *ptr) const;
-#endif
-
+ static bool contains(const void *ptr);
void dump(outputStream* const out) const;
void print_on(outputStream* st) const;
@@ -160,25 +156,16 @@
public:
// Total of space allocated to metadata in all Metaspaces
- static size_t used_in_bytes() {
- return used_in_bytes(Metaspace::ClassType) +
- used_in_bytes(Metaspace::NonClassType);
- }
+ static size_t used_in_bytes();
// Total of available space in all Metaspaces
// Total of capacity allocated to all Metaspaces. This includes
// space in Metachunks not yet allocated and in the Metachunk
// freelist.
- static size_t capacity_in_bytes() {
- return capacity_in_bytes(Metaspace::ClassType) +
- capacity_in_bytes(Metaspace::NonClassType);
- }
+ static size_t capacity_in_bytes();
// Total space reserved in all Metaspaces
- static size_t reserved_in_bytes() {
- return reserved_in_bytes(Metaspace::ClassType) +
- reserved_in_bytes(Metaspace::NonClassType);
- }
+ static size_t reserved_in_bytes();
static size_t min_chunk_size();
--- a/hotspot/src/share/vm/memory/resourceArea.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/memory/resourceArea.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,18 +26,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/mutexLocker.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
+#include "runtime/thread.inline.hpp"
//------------------------------ResourceMark-----------------------------------
debug_only(int ResourceArea::_warned;) // to suppress multiple warnings
--- a/hotspot/src/share/vm/memory/resourceArea.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/memory/resourceArea.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,18 +26,7 @@
#define SHARE_VM_MEMORY_RESOURCEAREA_HPP
#include "memory/allocation.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
+#include "runtime/thread.inline.hpp"
// The resource area holds temporary data structures in the VM.
// The actual allocation areas are thread local. Typical usage:
--- a/hotspot/src/share/vm/memory/sharedHeap.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/memory/sharedHeap.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -154,10 +154,12 @@
if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
JNIHandles::oops_do(roots);
// All threads execute this; the individual threads are task groups.
+ CLDToOopClosure roots_from_clds(roots);
+ CLDToOopClosure* roots_from_clds_p = (is_scavenging ? NULL : &roots_from_clds);
if (ParallelGCThreads > 0) {
- Threads::possibly_parallel_oops_do(roots, code_roots);
+ Threads::possibly_parallel_oops_do(roots, roots_from_clds_p ,code_roots);
} else {
- Threads::oops_do(roots, code_roots);
+ Threads::oops_do(roots, roots_from_clds_p, code_roots);
}
if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
ObjectSynchronizer::oops_do(roots);
--- a/hotspot/src/share/vm/memory/space.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/memory/space.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -790,7 +790,9 @@
// Very general, slow implementation.
HeapWord* ContiguousSpace::block_start_const(const void* p) const {
- assert(MemRegion(bottom(), end()).contains(p), "p not in space");
+ assert(MemRegion(bottom(), end()).contains(p),
+ err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
+ p, bottom(), end()));
if (p >= top()) {
return top();
} else {
@@ -800,19 +802,27 @@
last = cur;
cur += oop(cur)->size();
}
- assert(oop(last)->is_oop(), "Should be an object start");
+ assert(oop(last)->is_oop(),
+ err_msg(PTR_FORMAT " should be an object start", last));
return last;
}
}
size_t ContiguousSpace::block_size(const HeapWord* p) const {
- assert(MemRegion(bottom(), end()).contains(p), "p not in space");
+ assert(MemRegion(bottom(), end()).contains(p),
+ err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
+ p, bottom(), end()));
HeapWord* current_top = top();
- assert(p <= current_top, "p is not a block start");
- assert(p == current_top || oop(p)->is_oop(), "p is not a block start");
- if (p < current_top)
+ assert(p <= current_top,
+ err_msg("p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT,
+ p, current_top));
+ assert(p == current_top || oop(p)->is_oop(),
+ err_msg("p (" PTR_FORMAT ") is not a block start - "
+ "current_top: " PTR_FORMAT ", is_oop: %s",
+ p, current_top, BOOL_TO_STR(oop(p)->is_oop())));
+ if (p < current_top) {
return oop(p)->size();
- else {
+ } else {
assert(p == current_top, "just checking");
return pointer_delta(end(), (HeapWord*) p);
}
--- a/hotspot/src/share/vm/memory/threadLocalAllocBuffer.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/memory/threadLocalAllocBuffer.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -28,19 +28,8 @@
#include "memory/threadLocalAllocBuffer.inline.hpp"
#include "memory/universe.inline.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/thread.inline.hpp"
#include "utilities/copy.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
// Thread-Local Edens support
--- a/hotspot/src/share/vm/memory/universe.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/memory/universe.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -62,6 +62,7 @@
#include "runtime/javaCalls.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/synchronizer.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/timer.hpp"
#include "runtime/vm_operations.hpp"
#include "services/memoryService.hpp"
@@ -69,18 +70,6 @@
#include "utilities/events.hpp"
#include "utilities/hashtable.inline.hpp"
#include "utilities/preserveException.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
#ifndef SERIALGC
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
#include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
@@ -418,6 +407,10 @@
assert(i == _fullgc_alot_dummy_array->length(), "just checking");
}
#endif
+
+ // Initialize dependency array for null class loader
+ ClassLoaderData::the_null_class_loader_data()->init_dependencies(CHECK);
+
}
// CDS support for patching vtables in metadata in the shared archive.
@@ -425,14 +418,10 @@
// from MetaspaceObj, because the latter does not have virtual functions.
// If the metadata type has a vtable, it cannot be shared in the read-only
// section of the CDS archive, because the vtable pointer is patched.
-static inline void* dereference(void* addr) {
- return *(void**)addr;
-}
-
static inline void add_vtable(void** list, int* n, void* o, int count) {
guarantee((*n) < count, "vtable list too small");
- void* vtable = dereference(o);
- assert(dereference(vtable) != NULL, "invalid vtable");
+ void* vtable = dereference_vptr(o);
+ assert(*(void**)(vtable) != NULL, "invalid vtable");
list[(*n)++] = vtable;
}
--- a/hotspot/src/share/vm/oops/compiledICHolder.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/oops/compiledICHolder.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -48,8 +48,8 @@
// Verification
void CompiledICHolder::verify_on(outputStream* st) {
- guarantee(holder_method()->is_metadata(), "should be in permspace");
+ guarantee(holder_method()->is_metadata(), "should be in metaspace");
guarantee(holder_method()->is_method(), "should be method");
- guarantee(holder_klass()->is_metadata(), "should be in permspace");
+ guarantee(holder_klass()->is_metadata(), "should be in metaspace");
guarantee(holder_klass()->is_klass(), "should be klass");
}
--- a/hotspot/src/share/vm/oops/constMethod.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/oops/constMethod.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -39,16 +39,19 @@
int localvariable_table_length,
int exception_table_length,
int checked_exceptions_length,
+ u2 generic_signature_index,
MethodType method_type,
TRAPS) {
int size = ConstMethod::size(byte_code_size,
compressed_line_number_size,
localvariable_table_length,
exception_table_length,
- checked_exceptions_length);
+ checked_exceptions_length,
+ generic_signature_index);
return new (loader_data, size, true, THREAD) ConstMethod(
byte_code_size, compressed_line_number_size, localvariable_table_length,
- exception_table_length, checked_exceptions_length, method_type, size);
+ exception_table_length, checked_exceptions_length, generic_signature_index,
+ method_type, size);
}
ConstMethod::ConstMethod(int byte_code_size,
@@ -56,6 +59,7 @@
int localvariable_table_length,
int exception_table_length,
int checked_exceptions_length,
+ u2 generic_signature_index,
MethodType method_type,
int size) {
@@ -66,7 +70,8 @@
set_stackmap_data(NULL);
set_code_size(byte_code_size);
set_constMethod_size(size);
- set_inlined_tables_length(checked_exceptions_length,
+ set_inlined_tables_length(generic_signature_index,
+ checked_exceptions_length,
compressed_line_number_size,
localvariable_table_length,
exception_table_length);
@@ -90,7 +95,8 @@
int compressed_line_number_size,
int local_variable_table_length,
int exception_table_length,
- int checked_exceptions_length) {
+ int checked_exceptions_length,
+ u2 generic_signature_index) {
int extra_bytes = code_size;
if (compressed_line_number_size > 0) {
extra_bytes += compressed_line_number_size;
@@ -108,6 +114,9 @@
extra_bytes += sizeof(u2);
extra_bytes += exception_table_length * sizeof(ExceptionTableElement);
}
+ if (generic_signature_index != 0) {
+ extra_bytes += sizeof(u2);
+ }
int extra_words = align_size_up(extra_bytes, BytesPerWord) / BytesPerWord;
return align_object_size(header_size() + extra_words);
}
@@ -125,10 +134,17 @@
return code_end();
}
-u2* ConstMethod::checked_exceptions_length_addr() const {
+u2* ConstMethod::generic_signature_index_addr() const {
// Located at the end of the constMethod.
+ assert(has_generic_signature(), "called only if generic signature exists");
+ return last_u2_element();
+}
+
+u2* ConstMethod::checked_exceptions_length_addr() const {
+ // Located immediately before the generic signature index.
assert(has_checked_exceptions(), "called only if table is present");
- return last_u2_element();
+ return has_generic_signature() ? (last_u2_element() - 1) :
+ last_u2_element();
}
u2* ConstMethod::exception_table_length_addr() const {
@@ -137,8 +153,10 @@
// If checked_exception present, locate immediately before them.
return (u2*) checked_exceptions_start() - 1;
} else {
- // Else, the exception table is at the end of the constMethod.
- return last_u2_element();
+ // Else, the exception table is at the end of the constMethod or
+ // immediately before the generic signature index.
+ return has_generic_signature() ? (last_u2_element() - 1) :
+ last_u2_element();
}
}
@@ -152,25 +170,30 @@
// If checked_exception present, locate immediately before them.
return (u2*) checked_exceptions_start() - 1;
} else {
- // Else, the linenumber table is at the end of the constMethod.
- return last_u2_element();
+ // Else, the linenumber table is at the end of the constMethod or
+ // immediately before the generic signature index.
+ return has_generic_signature() ? (last_u2_element() - 1) :
+ last_u2_element();
}
}
}
-
// Update the flags to indicate the presence of these optional fields.
-void ConstMethod::set_inlined_tables_length(
- int checked_exceptions_len,
- int compressed_line_number_size,
- int localvariable_table_len,
- int exception_table_len) {
+void ConstMethod::set_inlined_tables_length(u2 generic_signature_index,
+ int checked_exceptions_len,
+ int compressed_line_number_size,
+ int localvariable_table_len,
+ int exception_table_len) {
// Must be done in the order below, otherwise length_addr accessors
// will not work. Only set bit in header if length is positive.
assert(_flags == 0, "Error");
if (compressed_line_number_size > 0) {
_flags |= _has_linenumber_table;
}
+ if (generic_signature_index != 0) {
+ _flags |= _has_generic_signature;
+ *(generic_signature_index_addr()) = generic_signature_index;
+ }
if (checked_exceptions_len > 0) {
_flags |= _has_checked_exceptions;
*(checked_exceptions_length_addr()) = checked_exceptions_len;
--- a/hotspot/src/share/vm/oops/constMethod.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/oops/constMethod.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -45,7 +45,7 @@
// | constMethod_size |
// | interp_kind | flags | code_size |
// | name index | signature index |
-// | method_idnum | generic_signature_index |
+// | method_idnum | max_stack |
// |------------------------------------------------------|
// | |
// | byte codes |
@@ -55,26 +55,29 @@
// | (see class CompressedLineNumberReadStream) |
// | (note that length is unknown until decompressed) |
// | (access flags bit tells whether table is present) |
-// | (indexed from start of ConstMethod*) |
+// | (indexed from start of ConstMethod*) |
// | (elements not necessarily sorted!) |
// |------------------------------------------------------|
// | localvariable table elements + length (length last) |
// | (length is u2, elements are 6-tuples of u2) |
// | (see class LocalVariableTableElement) |
// | (access flags bit tells whether table is present) |
-// | (indexed from end of ConstMethod*) |
+// | (indexed from end of ConstMethod*) |
// |------------------------------------------------------|
// | exception table + length (length last) |
// | (length is u2, elements are 4-tuples of u2) |
// | (see class ExceptionTableElement) |
// | (access flags bit tells whether table is present) |
-// | (indexed from end of ConstMethod*) |
+// | (indexed from end of ConstMethod*) |
// |------------------------------------------------------|
// | checked exceptions elements + length (length last) |
// | (length is u2, elements are u2) |
// | (see class CheckedExceptionElement) |
// | (access flags bit tells whether table is present) |
-// | (indexed from end of ConstMethod*) |
+// | (indexed from end of ConstMethod*) |
+// |------------------------------------------------------|
+// | generic signature index (u2) |
+// | (indexed from start of constMethodOop) |
// |------------------------------------------------------|
@@ -118,7 +121,8 @@
_has_checked_exceptions = 2,
_has_localvariable_table = 4,
_has_exception_table = 8,
- _is_overpass = 16
+ _has_generic_signature = 16,
+ _is_overpass = 32
};
// Bit vector of signature
@@ -145,7 +149,7 @@
u2 _method_idnum; // unique identification number for the method within the class
// initially corresponds to the index into the methods array.
// but this may change with redefinition
- u2 _generic_signature_index; // Generic signature (index in constant pool, 0 if absent)
+ u2 _max_stack; // Maximum number of entries on the expression stack
// Constructor
@@ -154,6 +158,7 @@
int localvariable_table_length,
int exception_table_length,
int checked_exceptions_length,
+ u2 generic_signature_index,
MethodType is_overpass,
int size);
public:
@@ -164,17 +169,22 @@
int localvariable_table_length,
int exception_table_length,
int checked_exceptions_length,
+ u2 generic_signature_index,
MethodType mt,
TRAPS);
bool is_constMethod() const { return true; }
// Inlined tables
- void set_inlined_tables_length(int checked_exceptions_len,
+ void set_inlined_tables_length(u2 generic_signature_index,
+ int checked_exceptions_len,
int compressed_line_number_size,
int localvariable_table_len,
int exception_table_len);
+ bool has_generic_signature() const
+ { return (_flags & _has_generic_signature) != 0; }
+
bool has_linenumber_table() const
{ return (_flags & _has_linenumber_table) != 0; }
@@ -252,8 +262,18 @@
void set_signature_index(int index) { _signature_index = index; }
// generics support
- int generic_signature_index() const { return _generic_signature_index; }
- void set_generic_signature_index(int index) { _generic_signature_index = index; }
+ int generic_signature_index() const {
+ if (has_generic_signature()) {
+ return *generic_signature_index_addr();
+ } else {
+ return 0;
+ }
+ }
+ void set_generic_signature_index(u2 index) {
+ assert(has_generic_signature(), "");
+ u2* addr = generic_signature_index_addr();
+ *addr = index;
+ }
// Sizing
static int header_size() {
@@ -264,7 +284,8 @@
static int size(int code_size, int compressed_line_number_size,
int local_variable_table_length,
int exception_table_length,
- int checked_exceptions_length);
+ int checked_exceptions_length,
+ u2 generic_signature_index);
int size() const { return _constMethod_size;}
void set_constMethod_size(int size) { _constMethod_size = size; }
@@ -281,6 +302,7 @@
// linenumber table - note that length is unknown until decompression,
// see class CompressedLineNumberReadStream.
u_char* compressed_linenumber_table() const; // not preserved by gc
+ u2* generic_signature_index_addr() const;
u2* checked_exceptions_length_addr() const;
u2* localvariable_table_length_addr() const;
u2* exception_table_length_addr() const;
@@ -314,12 +336,19 @@
static ByteSize constants_offset()
{ return byte_offset_of(ConstMethod, _constants); }
+ static ByteSize max_stack_offset()
+ { return byte_offset_of(ConstMethod, _max_stack); }
+
// Unique id for the method
static const u2 MAX_IDNUM;
static const u2 UNSET_IDNUM;
u2 method_idnum() const { return _method_idnum; }
void set_method_idnum(u2 idnum) { _method_idnum = idnum; }
+ // max stack
+ int max_stack() const { return _max_stack; }
+ void set_max_stack(int size) { _max_stack = size; }
+
// Deallocation for RedefineClasses
void deallocate_contents(ClassLoaderData* loader_data);
bool is_klass() const { return false; }
--- a/hotspot/src/share/vm/oops/constantPool.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/oops/constantPool.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -340,9 +340,7 @@
do_resolve = this_oop->tag_at(which).is_unresolved_klass();
if (do_resolve) {
ClassLoaderData* this_key = this_oop->pool_holder()->class_loader_data();
- if (!this_key->is_the_null_class_loader_data()) {
- this_key->record_dependency(k(), CHECK_NULL); // Can throw OOM
- }
+ this_key->record_dependency(k(), CHECK_NULL); // Can throw OOM
this_oop->klass_at_put(which, k());
}
}
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -51,20 +51,9 @@
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
+#include "runtime/thread.inline.hpp"
#include "services/threadService.hpp"
#include "utilities/dtrace.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
#ifndef SERIALGC
#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
--- a/hotspot/src/share/vm/oops/klass.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/oops/klass.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -373,29 +373,22 @@
debug_only(verify();)
}
-void Klass::remove_from_sibling_list() {
- // remove receiver from sibling list
- InstanceKlass* super = superklass();
- assert(super != NULL || this == SystemDictionary::Object_klass(), "should have super");
- if (super == NULL) return; // special case: class Object
- if (super->subklass() == this) {
- // first subklass
- super->set_subklass(_next_sibling);
- } else {
- Klass* sib = super->subklass();
- while (sib->next_sibling() != this) {
- sib = sib->next_sibling();
- };
- sib->set_next_sibling(_next_sibling);
- }
-}
-
bool Klass::is_loader_alive(BoolObjectClosure* is_alive) {
assert(is_metadata(), "p is not meta-data");
assert(ClassLoaderDataGraph::contains((address)this), "is in the metaspace");
+
+#ifdef ASSERT
// The class is alive iff the class loader is alive.
oop loader = class_loader();
- return (loader == NULL) || is_alive->do_object_b(loader);
+ bool loader_alive = (loader == NULL) || is_alive->do_object_b(loader);
+#endif // ASSERT
+
+ // The class is alive if it's mirror is alive (which should be marked if the
+ // loader is alive) unless it's an anoymous class.
+ bool mirror_alive = is_alive->do_object_b(java_mirror());
+ assert(!mirror_alive || loader_alive, "loader must be alive if the mirror is"
+ " but not the other way around with anonymous classes");
+ return mirror_alive;
}
void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive) {
@@ -416,10 +409,10 @@
Klass* sub = current->subklass_oop();
while (sub != NULL && !sub->is_loader_alive(is_alive)) {
#ifndef PRODUCT
- if (TraceClassUnloading && WizardMode) {
- ResourceMark rm;
+ if (TraceClassUnloading && WizardMode) {
+ ResourceMark rm;
tty->print_cr("[Unlinking class (subclass) %s]", sub->external_name());
- }
+ }
#endif
sub = sub->next_sibling_oop();
}
@@ -431,16 +424,16 @@
// Find and set the first alive sibling
Klass* sibling = current->next_sibling_oop();
while (sibling != NULL && !sibling->is_loader_alive(is_alive)) {
- if (TraceClassUnloading && WizardMode) {
- ResourceMark rm;
+ if (TraceClassUnloading && WizardMode) {
+ ResourceMark rm;
tty->print_cr("[Unlinking class (sibling) %s]", sibling->external_name());
- }
+ }
sibling = sibling->next_sibling_oop();
- }
+ }
current->set_next_sibling(sibling);
if (sibling != NULL) {
stack.push(sibling);
-}
+ }
// Clean the implementors list and method data.
if (current->oop_is_instance()) {
@@ -554,7 +547,11 @@
InstanceKlass* ik = (InstanceKlass*) this;
if (ik->is_anonymous()) {
assert(EnableInvokeDynamic, "");
- intptr_t hash = ik->java_mirror()->identity_hash();
+ intptr_t hash = 0;
+ if (ik->java_mirror() != NULL) {
+ // java_mirror might not be created yet, return 0 as hash.
+ hash = ik->java_mirror()->identity_hash();
+ }
char hash_buf[40];
sprintf(hash_buf, "/" UINTX_FORMAT, (uintx)hash);
size_t hash_len = strlen(hash_buf);
--- a/hotspot/src/share/vm/oops/klass.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/oops/klass.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -267,7 +267,6 @@
Klass* subklass() const;
Klass* next_sibling() const;
void append_to_sibling_list(); // add newly created receiver to superklass' subklass list
- void remove_from_sibling_list(); // remove receiver from sibling list
void set_next_link(Klass* k) { _next_link = k; }
Klass* next_link() const { return _next_link; } // The next klass defined by the class loader.
@@ -581,8 +580,8 @@
// garbage collection support
virtual void oops_do(OopClosure* cl);
- // Checks if the class loader is alive.
- // Iff the class loader is alive the Klass is considered alive.
+ // Iff the class loader (or mirror for anonymous classes) is alive the
+ // Klass is considered alive.
// The is_alive closure passed in depends on the Garbage Collector used.
bool is_loader_alive(BoolObjectClosure* is_alive);
--- a/hotspot/src/share/vm/oops/markOop.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/oops/markOop.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -24,19 +24,7 @@
#include "precompiled.hpp"
#include "oops/markOop.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
-
+#include "runtime/thread.inline.hpp"
void markOopDesc::print_on(outputStream* st) const {
if (is_locked()) {
--- a/hotspot/src/share/vm/oops/method.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/oops/method.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -64,6 +64,7 @@
int localvariable_table_length,
int exception_table_length,
int checked_exceptions_length,
+ u2 generic_signature_index,
ConstMethod::MethodType method_type,
TRAPS) {
assert(!access_flags.is_native() || byte_code_size == 0,
@@ -74,6 +75,7 @@
localvariable_table_length,
exception_table_length,
checked_exceptions_length,
+ generic_signature_index,
method_type,
CHECK_NULL);
@@ -1034,7 +1036,7 @@
methodHandle m;
{
Method* m_oop = Method::allocate(loader_data, 0, accessFlags_from(flags_bits),
- 0, 0, 0, 0, ConstMethod::NORMAL, CHECK_(empty));
+ 0, 0, 0, 0, 0, ConstMethod::NORMAL, CHECK_(empty));
m = methodHandle(THREAD, m_oop);
}
m->set_constants(cp());
@@ -1082,6 +1084,7 @@
assert(!m->is_native(), "cannot rewrite native methods");
// Allocate new Method*
AccessFlags flags = m->access_flags();
+ u2 generic_signature_index = m->generic_signature_index();
int checked_exceptions_len = m->checked_exceptions_length();
int localvariable_len = m->localvariable_table_length();
int exception_table_len = m->exception_table_length();
@@ -1094,6 +1097,7 @@
localvariable_len,
exception_table_len,
checked_exceptions_len,
+ generic_signature_index,
m->method_type(),
CHECK_(methodHandle()));
methodHandle newm (THREAD, newm_oop);
@@ -1814,6 +1818,23 @@
loader_data->jmethod_ids()->clear_all_methods();
}
+
+// Check that this pointer is valid by checking that the vtbl pointer matches
+bool Method::is_valid_method() const {
+ if (this == NULL) {
+ return false;
+ } else if (!is_metaspace_object()) {
+ return false;
+ } else {
+ Method m;
+ // This assumes that the vtbl pointer is the first word of a C++ object.
+ // This assumption is also in universe.cpp patch_klass_vtble
+ void* vtbl2 = dereference_vptr((void*)&m);
+ void* this_vtbl = dereference_vptr((void*)this);
+ return vtbl2 == this_vtbl;
+ }
+}
+
#ifndef PRODUCT
void Method::print_jmethod_ids(ClassLoaderData* loader_data, outputStream* out) {
out->print_cr("jni_method_id count = %d", loader_data->jmethod_ids()->count_methods());
@@ -1935,7 +1956,7 @@
guarantee(constMethod()->is_metadata(), "should be metadata");
MethodData* md = method_data();
guarantee(md == NULL ||
- md->is_metadata(), "should be in permspace");
+ md->is_metadata(), "should be metadata");
guarantee(md == NULL ||
md->is_methodData(), "should be method data");
}
--- a/hotspot/src/share/vm/oops/method.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/oops/method.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -73,12 +73,10 @@
// |------------------------------------------------------|
// | result_index (C++ interpreter only) |
// |------------------------------------------------------|
-// | method_size | max_stack |
-// | max_locals | size_of_parameters |
+// | method_size | max_locals |
+// | size_of_parameters | intrinsic_id| flags |
// |------------------------------------------------------|
-// |intrinsic_id| flags | throwout_count |
-// |------------------------------------------------------|
-// | num_breakpoints | (unused) |
+// | throwout_count | num_breakpoints |
// |------------------------------------------------------|
// | invocation_counter |
// | backedge_counter |
@@ -118,7 +116,6 @@
int _result_index; // C++ interpreter needs for converting results to/from stack
#endif
u2 _method_size; // size of this object
- u2 _max_stack; // Maximum number of entries on the expression stack
u2 _max_locals; // Number of local variables used by this method
u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words
u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
@@ -166,10 +163,12 @@
int localvariable_table_length,
int exception_table_length,
int checked_exceptions_length,
+ u2 generic_signature_index,
ConstMethod::MethodType method_type,
TRAPS);
- Method() { assert(DumpSharedSpaces || UseSharedSpaces, "only for CDS"); }
+ // CDS and vtbl checking can create an empty Method to get vtbl pointer.
+ Method(){}
// The Method vtable is restored by this call when the Method is in the
// shared archive. See patch_klass_vtables() in metaspaceShared.cpp for
@@ -288,9 +287,9 @@
// max stack
// return original max stack size for method verification
- int verifier_max_stack() const { return _max_stack; }
- int max_stack() const { return _max_stack + extra_stack_entries(); }
- void set_max_stack(int size) { _max_stack = size; }
+ int verifier_max_stack() const { return constMethod()->max_stack(); }
+ int max_stack() const { return constMethod()->max_stack() + extra_stack_entries(); }
+ void set_max_stack(int size) { constMethod()->set_max_stack(size); }
// max locals
int max_locals() const { return _max_locals; }
@@ -606,7 +605,6 @@
static ByteSize from_interpreted_offset() { return byte_offset_of(Method, _from_interpreted_entry ); }
static ByteSize interpreter_entry_offset() { return byte_offset_of(Method, _i2i_entry ); }
static ByteSize signature_handler_offset() { return in_ByteSize(sizeof(Method) + wordSize); }
- static ByteSize max_stack_offset() { return byte_offset_of(Method, _max_stack ); }
// for code generation
static int method_data_offset_in_bytes() { return offset_of(Method, _method_data); }
@@ -812,6 +810,9 @@
const char* internal_name() const { return "{method}"; }
+ // Check for valid method pointer
+ bool is_valid_method() const;
+
// Verify
void verify() { verify_on(tty); }
void verify_on(outputStream* st);
--- a/hotspot/src/share/vm/oops/objArrayKlass.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/oops/objArrayKlass.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -88,11 +88,6 @@
}
if (!supers_exist) {
// Oops. Not allocated yet. Back out, allocate it, and retry.
-#ifndef PRODUCT
- if (WizardMode) {
- tty->print_cr("Must retry array klass creation for depth %d",n);
- }
-#endif
KlassHandle ek;
{
MutexUnlocker mu(MultiArray_lock);
--- a/hotspot/src/share/vm/oops/oop.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/oops/oop.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -27,19 +27,8 @@
#include "classfile/javaClasses.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
+#include "runtime/thread.inline.hpp"
#include "utilities/copy.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
bool always_do_update_barrier = false;
--- a/hotspot/src/share/vm/oops/oopsHierarchy.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/oops/oopsHierarchy.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,20 +26,8 @@
#include "gc_interface/collectedHeap.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "oops/oopsHierarchy.hpp"
-#include "runtime/thread.hpp"
+#include "runtime/thread.inline.hpp"
#include "utilities/globalDefinitions.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
#ifdef CHECK_UNHANDLED_OOPS
--- a/hotspot/src/share/vm/opto/block.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/block.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -292,7 +292,7 @@
void needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs);
bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
// Cleanup if any code lands between a Call and his Catch
- void call_catch_cleanup(Block_Array &bbs);
+ void call_catch_cleanup(Block_Array &bbs, Compile *C);
// Detect implicit-null-check opportunities. Basically, find NULL checks
// with suitable memory ops nearby. Use the memory op to do the NULL check.
// I can generate a memory op if there is not one nearby.
--- a/hotspot/src/share/vm/opto/c2_globals.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -115,6 +115,12 @@
notproduct(bool, VerifyOpto, false, \
"Apply more time consuming verification during compilation") \
\
+ notproduct(bool, VerifyIdealNodeCount, false, \
+ "Verify that tracked dead ideal node count is accurate") \
+ \
+ notproduct(bool, PrintIdealNodeCount, false, \
+ "Print liveness counts of ideal nodes") \
+ \
notproduct(bool, VerifyOptoOopOffsets, false, \
"Check types of base addresses in field references") \
\
--- a/hotspot/src/share/vm/opto/callGenerator.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/callGenerator.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -139,7 +139,7 @@
if (!is_static) {
// Make an explicit receiver null_check as part of this call.
// Since we share a map with the caller, his JVMS gets adjusted.
- kit.null_check_receiver(method());
+ kit.null_check_receiver_before_call(method());
if (kit.stopped()) {
// And dump it back to the caller, decorated with any exceptions:
return kit.transfer_exceptions_into_jvms();
@@ -207,7 +207,7 @@
>= (uint)ImplicitNullCheckThreshold))) {
// Make an explicit receiver null_check as part of this call.
// Since we share a map with the caller, his JVMS gets adjusted.
- receiver = kit.null_check_receiver(method());
+ receiver = kit.null_check_receiver_before_call(method());
if (kit.stopped()) {
// And dump it back to the caller, decorated with any exceptions:
return kit.transfer_exceptions_into_jvms();
@@ -491,7 +491,7 @@
jvms->bci(), log->identify(_predicted_receiver));
}
- receiver = kit.null_check_receiver(method());
+ receiver = kit.null_check_receiver_before_call(method());
if (kit.stopped()) {
return kit.transfer_exceptions_into_jvms();
}
@@ -597,7 +597,7 @@
switch (iid) {
case vmIntrinsics::_invokeBasic:
{
- // get MethodHandle receiver
+ // Get MethodHandle receiver:
Node* receiver = kit.argument(0);
if (receiver->Opcode() == Op_ConP) {
const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
@@ -618,7 +618,7 @@
case vmIntrinsics::_linkToSpecial:
case vmIntrinsics::_linkToInterface:
{
- // pop MemberName argument
+ // Get MemberName argument:
Node* member_name = kit.argument(callee->arg_size() - 1);
if (member_name->Opcode() == Op_ConP) {
const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
--- a/hotspot/src/share/vm/opto/callnode.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/callnode.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -344,17 +344,26 @@
OopMap *oop_map() const { return _oop_map; }
void set_oop_map(OopMap *om) { _oop_map = om; }
+ private:
+ void verify_input(JVMState* jvms, uint idx) const {
+ assert(verify_jvms(jvms), "jvms must match");
+ Node* n = in(idx);
+ assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) ||
+ in(idx + 1)->is_top(), "2nd half of long/double");
+ }
+
+ public:
// Functionality from old debug nodes which has changed
Node *local(JVMState* jvms, uint idx) const {
- assert(verify_jvms(jvms), "jvms must match");
+ verify_input(jvms, jvms->locoff() + idx);
return in(jvms->locoff() + idx);
}
Node *stack(JVMState* jvms, uint idx) const {
- assert(verify_jvms(jvms), "jvms must match");
+ verify_input(jvms, jvms->stkoff() + idx);
return in(jvms->stkoff() + idx);
}
Node *argument(JVMState* jvms, uint idx) const {
- assert(verify_jvms(jvms), "jvms must match");
+ verify_input(jvms, jvms->argoff() + idx);
return in(jvms->argoff() + idx);
}
Node *monitor_box(JVMState* jvms, uint idx) const {
--- a/hotspot/src/share/vm/opto/chaitin.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/chaitin.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -1495,7 +1495,7 @@
cisc->ins_req(1,src); // Requires a memory edge
}
b->_nodes.map(j,cisc); // Insert into basic block
- n->subsume_by(cisc); // Correct graph
+ n->subsume_by(cisc, C); // Correct graph
//
++_used_cisc_instructions;
#ifndef PRODUCT
--- a/hotspot/src/share/vm/opto/compile.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/compile.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,11 +23,13 @@
*/
#include "precompiled.hpp"
-#include "asm/assembler.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/exceptionHandlerTable.hpp"
#include "code/nmethod.hpp"
#include "compiler/compileLog.hpp"
+#include "compiler/disassembler.hpp"
#include "compiler/oopMap.hpp"
#include "opto/addnode.hpp"
#include "opto/block.hpp"
@@ -316,7 +318,12 @@
}
-
+static inline bool not_a_node(const Node* n) {
+ if (n == NULL) return true;
+ if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc.
+ if (*(address*)n == badAddress) return true; // kill by Node::destruct
+ return false;
+}
// Identify all nodes that are reachable from below, useful.
// Use breadth-first pass that records state in a Unique_Node_List,
@@ -337,12 +344,27 @@
uint max = n->len();
for( uint i = 0; i < max; ++i ) {
Node *m = n->in(i);
- if( m == NULL ) continue;
+ if (not_a_node(m)) continue;
useful.push(m);
}
}
}
+// Update dead_node_list with any missing dead nodes using useful
+// list. Consider all non-useful nodes to be useless i.e., dead nodes.
+void Compile::update_dead_node_list(Unique_Node_List &useful) {
+ uint max_idx = unique();
+ VectorSet& useful_node_set = useful.member_set();
+
+ for (uint node_idx = 0; node_idx < max_idx; node_idx++) {
+ // If node with index node_idx is not in useful set,
+ // mark it as dead in dead node list.
+ if (! useful_node_set.test(node_idx) ) {
+ record_dead_node(node_idx);
+ }
+ }
+}
+
// Disconnect all useless nodes by disconnecting those at the boundary.
void Compile::remove_useless_nodes(Unique_Node_List &useful) {
uint next = 0;
@@ -582,6 +604,8 @@
_inner_loops(0),
_scratch_const_size(-1),
_in_scratch_emit_size(false),
+ _dead_node_list(comp_arena()),
+ _dead_node_count(0),
#ifndef PRODUCT
_trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")),
_printer(IdealGraphPrinter::printer()),
@@ -873,6 +897,8 @@
_trace_opto_output(TraceOptoOutput),
_printer(NULL),
#endif
+ _dead_node_list(comp_arena()),
+ _dead_node_count(0),
_congraph(NULL) {
C = this;
@@ -1069,6 +1095,72 @@
assert(_top == NULL || top()->is_top(), "");
}
+#ifdef ASSERT
+uint Compile::count_live_nodes_by_graph_walk() {
+ Unique_Node_List useful(comp_arena());
+ // Get useful node list by walking the graph.
+ identify_useful_nodes(useful);
+ return useful.size();
+}
+
+void Compile::print_missing_nodes() {
+
+ // Return if CompileLog is NULL and PrintIdealNodeCount is false.
+ if ((_log == NULL) && (! PrintIdealNodeCount)) {
+ return;
+ }
+
+ // This is an expensive function. It is executed only when the user
+ // specifies VerifyIdealNodeCount option or otherwise knows the
+ // additional work that needs to be done to identify reachable nodes
+ // by walking the flow graph and find the missing ones using
+ // _dead_node_list.
+
+ Unique_Node_List useful(comp_arena());
+ // Get useful node list by walking the graph.
+ identify_useful_nodes(useful);
+
+ uint l_nodes = C->live_nodes();
+ uint l_nodes_by_walk = useful.size();
+
+ if (l_nodes != l_nodes_by_walk) {
+ if (_log != NULL) {
+ _log->begin_head("mismatched_nodes count='%d'", abs((int) (l_nodes - l_nodes_by_walk)));
+ _log->stamp();
+ _log->end_head();
+ }
+ VectorSet& useful_member_set = useful.member_set();
+ int last_idx = l_nodes_by_walk;
+ for (int i = 0; i < last_idx; i++) {
+ if (useful_member_set.test(i)) {
+ if (_dead_node_list.test(i)) {
+ if (_log != NULL) {
+ _log->elem("mismatched_node_info node_idx='%d' type='both live and dead'", i);
+ }
+ if (PrintIdealNodeCount) {
+ // Print the log message to tty
+ tty->print_cr("mismatched_node idx='%d' both live and dead'", i);
+ useful.at(i)->dump();
+ }
+ }
+ }
+ else if (! _dead_node_list.test(i)) {
+ if (_log != NULL) {
+ _log->elem("mismatched_node_info node_idx='%d' type='neither live nor dead'", i);
+ }
+ if (PrintIdealNodeCount) {
+ // Print the log message to tty
+ tty->print_cr("mismatched_node idx='%d' type='neither live nor dead'", i);
+ }
+ }
+ }
+ if (_log != NULL) {
+ _log->tail("mismatched_nodes");
+ }
+ }
+}
+#endif
+
#ifndef PRODUCT
void Compile::verify_top(Node* tn) const {
if (tn != NULL) {
@@ -2087,7 +2179,7 @@
// Eliminate trivially redundant StoreCMs and accumulate their
// precedence edges.
-static void eliminate_redundant_card_marks(Node* n) {
+void Compile::eliminate_redundant_card_marks(Node* n) {
assert(n->Opcode() == Op_StoreCM, "expected StoreCM");
if (n->in(MemNode::Address)->outcnt() > 1) {
// There are multiple users of the same address so it might be
@@ -2122,7 +2214,7 @@
// Eliminate the previous StoreCM
prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
assert(mem->outcnt() == 0, "should be dead");
- mem->disconnect_inputs(NULL);
+ mem->disconnect_inputs(NULL, this);
} else {
prev = mem;
}
@@ -2133,7 +2225,7 @@
//------------------------------final_graph_reshaping_impl----------------------
// Implement items 1-5 from final_graph_reshaping below.
-static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
+void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
if ( n->outcnt() == 0 ) return; // dead node
uint nop = n->Opcode();
@@ -2163,8 +2255,7 @@
#ifdef ASSERT
if( n->is_Mem() ) {
- Compile* C = Compile::current();
- int alias_idx = C->get_alias_index(n->as_Mem()->adr_type());
+ int alias_idx = get_alias_index(n->as_Mem()->adr_type());
assert( n->in(0) != NULL || alias_idx != Compile::AliasIdxRaw ||
// oop will be recorded in oop map if load crosses safepoint
n->is_Load() && (n->as_Load()->bottom_type()->isa_oopptr() ||
@@ -2213,7 +2304,7 @@
break;
case Op_Opaque1: // Remove Opaque Nodes before matching
case Op_Opaque2: // Remove Opaque Nodes before matching
- n->subsume_by(n->in(1));
+ n->subsume_by(n->in(1), this);
break;
case Op_CallStaticJava:
case Op_CallJava:
@@ -2337,8 +2428,7 @@
int op = t->isa_oopptr() ? Op_ConN : Op_ConNKlass;
// Look for existing ConN node of the same exact type.
- Compile* C = Compile::current();
- Node* r = C->root();
+ Node* r = root();
uint cnt = r->outcnt();
for (uint i = 0; i < cnt; i++) {
Node* m = r->raw_out(i);
@@ -2352,14 +2442,14 @@
// Decode a narrow oop to match address
// [R12 + narrow_oop_reg<<3 + offset]
if (t->isa_oopptr()) {
- nn = new (C) DecodeNNode(nn, t);
+ nn = new (this) DecodeNNode(nn, t);
} else {
- nn = new (C) DecodeNKlassNode(nn, t);
+ nn = new (this) DecodeNKlassNode(nn, t);
}
n->set_req(AddPNode::Base, nn);
n->set_req(AddPNode::Address, nn);
if (addp->outcnt() == 0) {
- addp->disconnect_inputs(NULL);
+ addp->disconnect_inputs(NULL, this);
}
}
}
@@ -2371,7 +2461,6 @@
#ifdef _LP64
case Op_CastPP:
if (n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) {
- Compile* C = Compile::current();
Node* in1 = n->in(1);
const Type* t = n->bottom_type();
Node* new_in1 = in1->clone();
@@ -2400,9 +2489,9 @@
new_in1->set_req(0, n->in(0));
}
- n->subsume_by(new_in1);
+ n->subsume_by(new_in1, this);
if (in1->outcnt() == 0) {
- in1->disconnect_inputs(NULL);
+ in1->disconnect_inputs(NULL, this);
}
}
break;
@@ -2419,7 +2508,6 @@
}
assert(in1->is_DecodeNarrowPtr(), "sanity");
- Compile* C = Compile::current();
Node* new_in2 = NULL;
if (in2->is_DecodeNarrowPtr()) {
assert(in2->Opcode() == in1->Opcode(), "must be same node type");
@@ -2432,7 +2520,7 @@
// oops implicit null check is not generated.
// This will allow to generate normal oop implicit null check.
if (Matcher::gen_narrow_oop_implicit_null_checks())
- new_in2 = ConNode::make(C, TypeNarrowOop::NULL_PTR);
+ new_in2 = ConNode::make(this, TypeNarrowOop::NULL_PTR);
//
// This transformation together with CastPP transformation above
// will generated code for implicit NULL checks for compressed oops.
@@ -2471,19 +2559,19 @@
// NullCheck base_reg
//
} else if (t->isa_oopptr()) {
- new_in2 = ConNode::make(C, t->make_narrowoop());
+ new_in2 = ConNode::make(this, t->make_narrowoop());
} else if (t->isa_klassptr()) {
- new_in2 = ConNode::make(C, t->make_narrowklass());
+ new_in2 = ConNode::make(this, t->make_narrowklass());
}
}
if (new_in2 != NULL) {
- Node* cmpN = new (C) CmpNNode(in1->in(1), new_in2);
- n->subsume_by( cmpN );
+ Node* cmpN = new (this) CmpNNode(in1->in(1), new_in2);
+ n->subsume_by(cmpN, this);
if (in1->outcnt() == 0) {
- in1->disconnect_inputs(NULL);
+ in1->disconnect_inputs(NULL, this);
}
if (in2->outcnt() == 0) {
- in2->disconnect_inputs(NULL);
+ in2->disconnect_inputs(NULL, this);
}
}
}
@@ -2501,21 +2589,20 @@
case Op_EncodePKlass: {
Node* in1 = n->in(1);
if (in1->is_DecodeNarrowPtr()) {
- n->subsume_by(in1->in(1));
+ n->subsume_by(in1->in(1), this);
} else if (in1->Opcode() == Op_ConP) {
- Compile* C = Compile::current();
const Type* t = in1->bottom_type();
if (t == TypePtr::NULL_PTR) {
assert(t->isa_oopptr(), "null klass?");
- n->subsume_by(ConNode::make(C, TypeNarrowOop::NULL_PTR));
+ n->subsume_by(ConNode::make(this, TypeNarrowOop::NULL_PTR), this);
} else if (t->isa_oopptr()) {
- n->subsume_by(ConNode::make(C, t->make_narrowoop()));
+ n->subsume_by(ConNode::make(this, t->make_narrowoop()), this);
} else if (t->isa_klassptr()) {
- n->subsume_by(ConNode::make(C, t->make_narrowklass()));
+ n->subsume_by(ConNode::make(this, t->make_narrowklass()), this);
}
}
if (in1->outcnt() == 0) {
- in1->disconnect_inputs(NULL);
+ in1->disconnect_inputs(NULL, this);
}
break;
}
@@ -2538,7 +2625,7 @@
}
}
assert(proj != NULL, "must be found");
- p->subsume_by(proj);
+ p->subsume_by(proj, this);
}
}
break;
@@ -2558,7 +2645,7 @@
unique_in = NULL;
}
if (unique_in != NULL) {
- n->subsume_by(unique_in);
+ n->subsume_by(unique_in, this);
}
}
break;
@@ -2571,16 +2658,15 @@
Node* d = n->find_similar(Op_DivI);
if (d) {
// Replace them with a fused divmod if supported
- Compile* C = Compile::current();
if (Matcher::has_match_rule(Op_DivModI)) {
- DivModINode* divmod = DivModINode::make(C, n);
- d->subsume_by(divmod->div_proj());
- n->subsume_by(divmod->mod_proj());
+ DivModINode* divmod = DivModINode::make(this, n);
+ d->subsume_by(divmod->div_proj(), this);
+ n->subsume_by(divmod->mod_proj(), this);
} else {
// replace a%b with a-((a/b)*b)
- Node* mult = new (C) MulINode(d, d->in(2));
- Node* sub = new (C) SubINode(d->in(1), mult);
- n->subsume_by( sub );
+ Node* mult = new (this) MulINode(d, d->in(2));
+ Node* sub = new (this) SubINode(d->in(1), mult);
+ n->subsume_by(sub, this);
}
}
}
@@ -2592,16 +2678,15 @@
Node* d = n->find_similar(Op_DivL);
if (d) {
// Replace them with a fused divmod if supported
- Compile* C = Compile::current();
if (Matcher::has_match_rule(Op_DivModL)) {
- DivModLNode* divmod = DivModLNode::make(C, n);
- d->subsume_by(divmod->div_proj());
- n->subsume_by(divmod->mod_proj());
+ DivModLNode* divmod = DivModLNode::make(this, n);
+ d->subsume_by(divmod->div_proj(), this);
+ n->subsume_by(divmod->mod_proj(), this);
} else {
// replace a%b with a-((a/b)*b)
- Node* mult = new (C) MulLNode(d, d->in(2));
- Node* sub = new (C) SubLNode(d->in(1), mult);
- n->subsume_by( sub );
+ Node* mult = new (this) MulLNode(d, d->in(2));
+ Node* sub = new (this) SubLNode(d->in(1), mult);
+ n->subsume_by(sub, this);
}
}
}
@@ -2620,8 +2705,8 @@
if (n->req()-1 > 2) {
// Replace many operand PackNodes with a binary tree for matching
PackNode* p = (PackNode*) n;
- Node* btp = p->binary_tree_pack(Compile::current(), 1, n->req());
- n->subsume_by(btp);
+ Node* btp = p->binary_tree_pack(this, 1, n->req());
+ n->subsume_by(btp, this);
}
break;
case Op_Loop:
@@ -2645,18 +2730,16 @@
if (t != NULL && t->is_con()) {
juint shift = t->get_con();
if (shift > mask) { // Unsigned cmp
- Compile* C = Compile::current();
- n->set_req(2, ConNode::make(C, TypeInt::make(shift & mask)));
+ n->set_req(2, ConNode::make(this, TypeInt::make(shift & mask)));
}
} else {
if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
- Compile* C = Compile::current();
- Node* shift = new (C) AndINode(in2, ConNode::make(C, TypeInt::make(mask)));
+ Node* shift = new (this) AndINode(in2, ConNode::make(this, TypeInt::make(mask)));
n->set_req(2, shift);
}
}
if (in2->outcnt() == 0) { // Remove dead node
- in2->disconnect_inputs(NULL);
+ in2->disconnect_inputs(NULL, this);
}
}
break;
@@ -2674,7 +2757,7 @@
//------------------------------final_graph_reshaping_walk---------------------
// Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
// requires that the walk visits a node's inputs before visiting the node.
-static void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
+void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
ResourceArea *area = Thread::current()->resource_area();
Unique_Node_List sfpt(area);
@@ -2741,7 +2824,7 @@
n->set_req(j, in->in(1));
}
if (in->outcnt() == 0) {
- in->disconnect_inputs(NULL);
+ in->disconnect_inputs(NULL, this);
}
}
}
@@ -3014,7 +3097,8 @@
}
Compile::TracePhase::TracePhase(const char* name, elapsedTimer* accumulator, bool dolog)
- : TraceTime(NULL, accumulator, false NOT_PRODUCT( || TimeCompiler ), false)
+ : TraceTime(NULL, accumulator, false NOT_PRODUCT( || TimeCompiler ), false),
+ _phase_name(name), _dolog(dolog)
{
if (dolog) {
C = Compile::current();
@@ -3024,15 +3108,34 @@
_log = NULL;
}
if (_log != NULL) {
- _log->begin_head("phase name='%s' nodes='%d'", name, C->unique());
+ _log->begin_head("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
_log->stamp();
_log->end_head();
}
}
Compile::TracePhase::~TracePhase() {
+
+ C = Compile::current();
+ if (_dolog) {
+ _log = C->log();
+ } else {
+ _log = NULL;
+ }
+
+#ifdef ASSERT
+ if (PrintIdealNodeCount) {
+ tty->print_cr("phase name='%s' nodes='%d' live='%d' live_graph_walk='%d'",
+ _phase_name, C->unique(), C->live_nodes(), C->count_live_nodes_by_graph_walk());
+ }
+
+ if (VerifyIdealNodeCount) {
+ Compile::current()->print_missing_nodes();
+ }
+#endif
+
if (_log != NULL) {
- _log->done("phase nodes='%d'", C->unique());
+ _log->done("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
}
}
--- a/hotspot/src/share/vm/opto/compile.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/compile.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -75,6 +75,8 @@
class Unique_Node_List;
class nmethod;
class WarmCallInfo;
+class Node_Stack;
+struct Final_Reshape_Counts;
//------------------------------Compile----------------------------------------
// This class defines a top-level Compiler invocation.
@@ -98,6 +100,8 @@
private:
Compile* C;
CompileLog* _log;
+ const char* _phase_name;
+ bool _dolog;
public:
TracePhase(const char* name, elapsedTimer* accumulator, bool dolog);
~TracePhase();
@@ -313,6 +317,9 @@
// Node management
uint _unique; // Counter for unique Node indices
+ VectorSet _dead_node_list; // Set of dead nodes
+ uint _dead_node_count; // Number of dead nodes; VectorSet::Size() is O(N).
+ // So use this to keep count and make the call O(1).
debug_only(static int _debug_idx;) // Monotonic counter (not reset), use -XX:BreakAtNode=<idx>
Arena _node_arena; // Arena for new-space Nodes
Arena _old_arena; // Arena for old-space Nodes, lifetime during xform
@@ -534,7 +541,7 @@
ciEnv* env() const { return _env; }
CompileLog* log() const { return _log; }
bool failing() const { return _env->failing() || _failure_reason != NULL; }
- const char* failure_reason() { return _failure_reason; }
+ const char* failure_reason() { return _failure_reason; }
bool failure_reason_is(const char* r) { return (r==_failure_reason) || (r!=NULL && _failure_reason!=NULL && strcmp(r, _failure_reason)==0); }
void record_failure(const char* reason);
@@ -549,7 +556,7 @@
record_method_not_compilable(reason, true);
}
bool check_node_count(uint margin, const char* reason) {
- if (unique() + margin > (uint)MaxNodeLimit) {
+ if (live_nodes() + margin > (uint)MaxNodeLimit) {
record_method_not_compilable(reason);
return true;
} else {
@@ -558,25 +565,41 @@
}
// Node management
- uint unique() const { return _unique; }
- uint next_unique() { return _unique++; }
- void set_unique(uint i) { _unique = i; }
- static int debug_idx() { return debug_only(_debug_idx)+0; }
- static void set_debug_idx(int i) { debug_only(_debug_idx = i); }
- Arena* node_arena() { return &_node_arena; }
- Arena* old_arena() { return &_old_arena; }
- RootNode* root() const { return _root; }
- void set_root(RootNode* r) { _root = r; }
- StartNode* start() const; // (Derived from root.)
+ uint unique() const { return _unique; }
+ uint next_unique() { return _unique++; }
+ void set_unique(uint i) { _unique = i; }
+ static int debug_idx() { return debug_only(_debug_idx)+0; }
+ static void set_debug_idx(int i) { debug_only(_debug_idx = i); }
+ Arena* node_arena() { return &_node_arena; }
+ Arena* old_arena() { return &_old_arena; }
+ RootNode* root() const { return _root; }
+ void set_root(RootNode* r) { _root = r; }
+ StartNode* start() const; // (Derived from root.)
void init_start(StartNode* s);
- Node* immutable_memory();
+ Node* immutable_memory();
- Node* recent_alloc_ctl() const { return _recent_alloc_ctl; }
- Node* recent_alloc_obj() const { return _recent_alloc_obj; }
- void set_recent_alloc(Node* ctl, Node* obj) {
+ Node* recent_alloc_ctl() const { return _recent_alloc_ctl; }
+ Node* recent_alloc_obj() const { return _recent_alloc_obj; }
+ void set_recent_alloc(Node* ctl, Node* obj) {
_recent_alloc_ctl = ctl;
_recent_alloc_obj = obj;
- }
+ }
+ void record_dead_node(uint idx) { if (_dead_node_list.test_set(idx)) return;
+ _dead_node_count++;
+ }
+ uint dead_node_count() { return _dead_node_count; }
+ void reset_dead_node_list() { _dead_node_list.Reset();
+ _dead_node_count = 0;
+ }
+ uint live_nodes() {
+ int val = _unique - _dead_node_count;
+ assert (val >= 0, err_msg_res("number of tracked dead nodes %d more than created nodes %d", _unique, _dead_node_count));
+ return (uint) val;
+ }
+#ifdef ASSERT
+ uint count_live_nodes_by_graph_walk();
+ void print_missing_nodes();
+#endif
// Constant table
ConstantTable& constant_table() { return _constant_table; }
@@ -678,6 +701,7 @@
void identify_useful_nodes(Unique_Node_List &useful);
+ void update_dead_node_list(Unique_Node_List &useful);
void remove_useless_nodes (Unique_Node_List &useful);
WarmCallInfo* warm_calls() const { return _warm_calls; }
@@ -892,6 +916,11 @@
static juint _intrinsic_hist_count[vmIntrinsics::ID_LIMIT];
static jubyte _intrinsic_hist_flags[vmIntrinsics::ID_LIMIT];
#endif
+ // Function calls made by the public function final_graph_reshaping.
+ // No need to be made public as they are not called elsewhere.
+ void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc);
+ void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc );
+ void eliminate_redundant_card_marks(Node* n);
public:
--- a/hotspot/src/share/vm/opto/doCall.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/doCall.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -350,7 +350,7 @@
// Set frequently used booleans
const bool is_virtual = bc() == Bytecodes::_invokevirtual;
const bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
- const bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial;
+ const bool has_receiver = Bytecodes::has_receiver(bc());
// Find target being called
bool will_link;
@@ -380,6 +380,8 @@
// Note: In the absence of miranda methods, an abstract class K can perform
// an invokevirtual directly on an interface method I.m if K implements I.
+ // orig_callee is the resolved callee which's signature includes the
+ // appendix argument.
const int nargs = orig_callee->arg_size();
// Push appendix argument (MethodType, CallSite, etc.), if one.
@@ -572,7 +574,7 @@
}
// If there is going to be a trap, put it at the next bytecode:
set_bci(iter().next_bci());
- do_null_assert(peek(), T_OBJECT);
+ null_assert(peek());
set_bci(iter().cur_bci()); // put it back
}
}
--- a/hotspot/src/share/vm/opto/escape.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/escape.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -2320,7 +2320,7 @@
}
}
}
- if ((int)C->unique() + 2*NodeLimitFudgeFactor > MaxNodeLimit) {
+ if ((int) (C->live_nodes() + 2*NodeLimitFudgeFactor) > MaxNodeLimit) {
if (C->do_escape_analysis() == true && !C->failing()) {
// Retry compilation without escape analysis.
// If this is the first failure, the sentinel string will "stick"
--- a/hotspot/src/share/vm/opto/gcm.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/gcm.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -1359,7 +1359,7 @@
// If we inserted any instructions between a Call and his CatchNode,
// clone the instructions on all paths below the Catch.
for( i=0; i < _num_blocks; i++ )
- _blocks[i]->call_catch_cleanup(_bbs);
+ _blocks[i]->call_catch_cleanup(_bbs, C);
#ifndef PRODUCT
if (trace_opto_pipelining()) {
--- a/hotspot/src/share/vm/opto/graphKit.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/graphKit.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -93,6 +93,16 @@
return jvms;
}
+//--------------------------------sync_jvms_for_reexecute---------------------
+// Make sure our current jvms agrees with our parse state. This version
+// uses the reexecute_sp for reexecuting bytecodes.
+JVMState* GraphKit::sync_jvms_for_reexecute() {
+ JVMState* jvms = this->jvms();
+ jvms->set_bci(bci()); // Record the new bci in the JVMState
+ jvms->set_sp(reexecute_sp()); // Record the new sp in the JVMState
+ return jvms;
+}
+
#ifdef ASSERT
bool GraphKit::jvms_in_sync() const {
Parse* parse = is_Parse();
@@ -143,7 +153,7 @@
void GraphKit::stop_and_kill_map() {
SafePointNode* dead_map = stop();
if (dead_map != NULL) {
- dead_map->disconnect_inputs(NULL); // Mark the map as killed.
+ dead_map->disconnect_inputs(NULL, C); // Mark the map as killed.
assert(dead_map->is_killed(), "must be so marked");
}
}
@@ -826,7 +836,16 @@
// Walk the inline list to fill in the correct set of JVMState's
// Also fill in the associated edges for each JVMState.
- JVMState* youngest_jvms = sync_jvms();
+ // If the bytecode needs to be reexecuted we need to put
+ // the arguments back on the stack.
+ const bool should_reexecute = jvms()->should_reexecute();
+ JVMState* youngest_jvms = should_reexecute ? sync_jvms_for_reexecute() : sync_jvms();
+
+ // NOTE: set_bci (called from sync_jvms) might reset the reexecute bit to
+ // undefined if the bci is different. This is normal for Parse but it
+ // should not happen for LibraryCallKit because only one bci is processed.
+ assert(!is_LibraryCallKit() || (jvms()->should_reexecute() == should_reexecute),
+ "in LibraryCallKit the reexecute bit should not change");
// If we are guaranteed to throw, we can prune everything but the
// input to the current bytecode.
@@ -860,7 +879,7 @@
}
// Presize the call:
- debug_only(uint non_debug_edges = call->req());
+ DEBUG_ONLY(uint non_debug_edges = call->req());
call->add_req_batch(top(), youngest_jvms->debug_depth());
assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), "");
@@ -965,7 +984,7 @@
assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
}
-bool GraphKit::compute_stack_effects(int& inputs, int& depth, bool for_parse) {
+bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
Bytecodes::Code code = java_bc();
if (code == Bytecodes::_wide) {
code = method()->java_code_at_bci(bci() + 1);
@@ -1005,14 +1024,11 @@
case Bytecodes::_getfield:
case Bytecodes::_putfield:
{
+ bool ignored_will_link;
+ ciField* field = method()->get_field_at_bci(bci(), ignored_will_link);
+ int size = field->type()->size();
bool is_get = (depth >= 0), is_static = (depth & 1);
- ciBytecodeStream iter(method());
- iter.reset_to_bci(bci());
- iter.next();
- bool ignored_will_link;
- ciField* field = iter.get_field(ignored_will_link);
- int size = field->type()->size();
- inputs = (is_static ? 0 : 1);
+ inputs = (is_static ? 0 : 1);
if (is_get) {
depth = size - inputs;
} else {
@@ -1028,26 +1044,11 @@
case Bytecodes::_invokedynamic:
case Bytecodes::_invokeinterface:
{
- ciBytecodeStream iter(method());
- iter.reset_to_bci(bci());
- iter.next();
bool ignored_will_link;
ciSignature* declared_signature = NULL;
- ciMethod* callee = iter.get_method(ignored_will_link, &declared_signature);
+ ciMethod* ignored_callee = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
assert(declared_signature != NULL, "cannot be null");
- // (Do not use ciMethod::arg_size(), because
- // it might be an unloaded method, which doesn't
- // know whether it is static or not.)
- if (for_parse) {
- // Case 1: When called from parse we are *before* the invoke (in the
- // caller) and need to to adjust the inputs by an appendix
- // argument that will be pushed implicitly.
- inputs = callee->invoke_arg_size(code) - (iter.has_appendix() ? 1 : 0);
- } else {
- // Case 2: Here we are *after* the invoke (in the callee) and need to
- // remove any appendix arguments that were popped.
- inputs = callee->invoke_arg_size(code) - (callee->has_member_arg() ? 1 : 0);
- }
+ inputs = declared_signature->arg_size_for_bc(code);
int size = declared_signature->return_type()->size();
depth = size - inputs;
}
@@ -1178,7 +1179,7 @@
Node *chk = NULL;
switch(type) {
case T_LONG : chk = new (C) CmpLNode(value, _gvn.zerocon(T_LONG)); break;
- case T_INT : chk = new (C) CmpINode( value, _gvn.intcon(0)); break;
+ case T_INT : chk = new (C) CmpINode(value, _gvn.intcon(0)); break;
case T_ARRAY : // fall through
type = T_OBJECT; // simplify further tests
case T_OBJECT : {
@@ -1229,7 +1230,8 @@
break;
}
- default : ShouldNotReachHere();
+ default:
+ fatal(err_msg_res("unexpected type: %s", type2name(type)));
}
assert(chk != NULL, "sanity check");
chk = _gvn.transform(chk);
@@ -1809,7 +1811,7 @@
}
// Disconnect the call from the graph
- call->disconnect_inputs(NULL);
+ call->disconnect_inputs(NULL, C);
C->gvn_replace_by(call, C->top());
// Clean up any MergeMems that feed other MergeMems since the
@@ -1861,15 +1863,17 @@
// occurs here, the runtime will make sure an MDO exists. There is
// no need to call method()->ensure_method_data() at this point.
+ // Set the stack pointer to the right value for reexecution:
+ set_sp(reexecute_sp());
+
#ifdef ASSERT
if (!must_throw) {
// Make sure the stack has at least enough depth to execute
// the current bytecode.
- int inputs, ignore;
- if (compute_stack_effects(inputs, ignore)) {
- assert(sp() >= inputs, "must have enough JVMS stack to execute");
- // It is a frequent error in library_call.cpp to issue an
- // uncommon trap with the _sp value already popped.
+ int inputs, ignored_depth;
+ if (compute_stack_effects(inputs, ignored_depth)) {
+ assert(sp() >= inputs, err_msg_res("must have enough JVMS stack to execute %s: sp=%d, inputs=%d",
+ Bytecodes::name(java_bc()), sp(), inputs));
}
}
#endif
@@ -1900,7 +1904,8 @@
case Deoptimization::Action_make_not_compilable:
break;
default:
- assert(false, "bad action");
+ fatal(err_msg_res("unknown action %d: %s", action, Deoptimization::trap_action_name(action)));
+ break;
#endif
}
@@ -2667,7 +2672,7 @@
case SSC_always_false:
// It needs a null check because a null will *pass* the cast check.
// A non-null value will always produce an exception.
- return do_null_assert(obj, T_OBJECT);
+ return null_assert(obj);
}
}
}
@@ -2786,7 +2791,7 @@
mb->init_req(TypeFunc::Control, control());
mb->init_req(TypeFunc::Memory, reset_memory());
Node* membar = _gvn.transform(mb);
- set_control(_gvn.transform(new (C) ProjNode(membar,TypeFunc::Control) ));
+ set_control(_gvn.transform(new (C) ProjNode(membar, TypeFunc::Control)));
set_all_memory_call(membar);
return membar;
}
@@ -3148,7 +3153,7 @@
Node* cmp_lh = _gvn.transform( new(C) CmpINode(layout_val, intcon(layout_con)) );
Node* bol_lh = _gvn.transform( new(C) BoolNode(cmp_lh, BoolTest::eq) );
{ BuildCutout unless(this, bol_lh, PROB_MAX);
- _sp += nargs;
+ inc_sp(nargs);
uncommon_trap(Deoptimization::Reason_class_check,
Deoptimization::Action_maybe_recompile);
}
@@ -3391,7 +3396,7 @@
{
PreserveJVMState pjvms(this);
set_control(iffalse);
- _sp += nargs;
+ inc_sp(nargs);
uncommon_trap(reason, Deoptimization::Action_maybe_recompile);
}
Node* iftrue = _gvn.transform(new (C) IfTrueNode(iff));
--- a/hotspot/src/share/vm/opto/graphKit.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/graphKit.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -41,6 +41,7 @@
class FastLockNode;
class FastUnlockNode;
class IdealKit;
+class LibraryCallKit;
class Parse;
class RootNode;
@@ -60,11 +61,13 @@
PhaseGVN &_gvn; // Some optimizations while parsing
SafePointNode* _map; // Parser map from JVM to Nodes
SafePointNode* _exceptions;// Parser map(s) for exception state(s)
- int _sp; // JVM Expression Stack Pointer
int _bci; // JVM Bytecode Pointer
ciMethod* _method; // JVM Current Method
private:
+ int _sp; // JVM Expression Stack Pointer; don't modify directly!
+
+ private:
SafePointNode* map_not_null() const {
assert(_map != NULL, "must call stopped() to test for reset compiler map");
return _map;
@@ -80,7 +83,8 @@
}
#endif
- virtual Parse* is_Parse() const { return NULL; }
+ virtual Parse* is_Parse() const { return NULL; }
+ virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; }
ciEnv* env() const { return _env; }
PhaseGVN& gvn() const { return _gvn; }
@@ -141,7 +145,7 @@
_bci = jvms->bci();
_method = jvms->has_method() ? jvms->method() : NULL; }
void set_map(SafePointNode* m) { _map = m; debug_only(verify_map()); }
- void set_sp(int i) { assert(i >= 0, "must be non-negative"); _sp = i; }
+ void set_sp(int sp) { assert(sp >= 0, err_msg_res("sp must be non-negative: %d", sp)); _sp = sp; }
void clean_stack(int from_sp); // clear garbage beyond from_sp to top
void inc_sp(int i) { set_sp(sp() + i); }
@@ -149,7 +153,9 @@
void set_bci(int bci) { _bci = bci; }
// Make sure jvms has current bci & sp.
- JVMState* sync_jvms() const;
+ JVMState* sync_jvms() const;
+ JVMState* sync_jvms_for_reexecute();
+
#ifdef ASSERT
// Make sure JVMS has an updated copy of bci and sp.
// Also sanity-check method, depth, and monitor depth.
@@ -286,7 +292,7 @@
// How many stack inputs does the current BC consume?
// And, how does the stack change after the bytecode?
// Returns false if unknown.
- bool compute_stack_effects(int& inputs, int& depth, bool for_parse = false);
+ bool compute_stack_effects(int& inputs, int& depth);
// Add a fixed offset to a pointer
Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) {
@@ -337,20 +343,37 @@
Node* load_object_klass(Node* object);
// Find out the length of an array.
Node* load_array_length(Node* array);
+
+
// Helper function to do a NULL pointer check or ZERO check based on type.
- Node* null_check_common(Node* value, BasicType type,
- bool assert_null, Node* *null_control);
// Throw an exception if a given value is null.
// Return the value cast to not-null.
// Be clever about equivalent dominating null checks.
- Node* do_null_check(Node* value, BasicType type) {
- return null_check_common(value, type, false, NULL);
+ Node* null_check_common(Node* value, BasicType type,
+ bool assert_null = false, Node* *null_control = NULL);
+ Node* null_check(Node* value, BasicType type = T_OBJECT) {
+ return null_check_common(value, type);
+ }
+ Node* null_check_receiver() {
+ assert(argument(0)->bottom_type()->isa_ptr(), "must be");
+ return null_check(argument(0));
+ }
+ Node* zero_check_int(Node* value) {
+ assert(value->bottom_type()->basic_type() == T_INT,
+ err_msg_res("wrong type: %s", type2name(value->bottom_type()->basic_type())));
+ return null_check_common(value, T_INT);
+ }
+ Node* zero_check_long(Node* value) {
+ assert(value->bottom_type()->basic_type() == T_LONG,
+ err_msg_res("wrong type: %s", type2name(value->bottom_type()->basic_type())));
+ return null_check_common(value, T_LONG);
}
// Throw an uncommon trap if a given value is __not__ null.
// Return the value cast to null, and be clever about dominating checks.
- Node* do_null_assert(Node* value, BasicType type) {
- return null_check_common(value, type, true, NULL);
+ Node* null_assert(Node* value, BasicType type = T_OBJECT) {
+ return null_check_common(value, type, true);
}
+
// Null check oop. Return null-path control into (*null_control).
// Return a cast-not-null node which depends on the not-null control.
// If never_see_null, use an uncommon trap (*null_control sees a top).
@@ -371,9 +394,9 @@
// Replace all occurrences of one node by another.
void replace_in_map(Node* old, Node* neww);
- void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms, _sp++, n); }
- Node* pop() { map_not_null(); return _map->stack( _map->_jvms, --_sp); }
- Node* peek(int off = 0) { map_not_null(); return _map->stack( _map->_jvms, _sp - off - 1); }
+ void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms, _sp++ , n); }
+ Node* pop() { map_not_null(); return _map->stack( _map->_jvms, --_sp ); }
+ Node* peek(int off = 0) { map_not_null(); return _map->stack( _map->_jvms, _sp - off - 1 ); }
void push_pair(Node* ldval) {
push(ldval);
@@ -580,19 +603,15 @@
//---------- help for generating calls --------------
- // Do a null check on the receiver, which is in argument(0).
- Node* null_check_receiver(ciMethod* callee) {
+ // Do a null check on the receiver as it would happen before the call to
+ // callee (with all arguments still on the stack).
+ Node* null_check_receiver_before_call(ciMethod* callee) {
assert(!callee->is_static(), "must be a virtual method");
- int nargs = 1 + callee->signature()->size();
- // Null check on self without removing any arguments. The argument
- // null check technically happens in the wrong place, which can lead to
- // invalid stack traces when the primitive is inlined into a method
- // which handles NullPointerExceptions.
- Node* receiver = argument(0);
- _sp += nargs;
- receiver = do_null_check(receiver, T_OBJECT);
- _sp -= nargs;
- return receiver;
+ const int nargs = callee->arg_size();
+ inc_sp(nargs);
+ Node* n = null_check_receiver();
+ dec_sp(nargs);
+ return n;
}
// Fill in argument edges for the call from argument(0), argument(1), ...
@@ -645,6 +664,9 @@
klass, reason_string, must_throw, keep_exact_action);
}
+ // SP when bytecode needs to be reexecuted.
+ virtual int reexecute_sp() { return sp(); }
+
// Report if there were too many traps at the current method and bci.
// Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
// If there is no MDO at all, report no trap unless told to assume it.
--- a/hotspot/src/share/vm/opto/ifg.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/ifg.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -573,7 +573,7 @@
(n2lidx(def) && !liveout.member(n2lidx(def)) ) ) {
b->_nodes.remove(j - 1);
if( lrgs(r)._def == n ) lrgs(r)._def = 0;
- n->disconnect_inputs(NULL);
+ n->disconnect_inputs(NULL, C);
_cfg._bbs.map(n->_idx,NULL);
n->replace_by(C->top());
// Since yanking a Node from block, high pressure moves up one
--- a/hotspot/src/share/vm/opto/lcm.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/lcm.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -1006,7 +1006,7 @@
//------------------------------call_catch_cleanup-----------------------------
// If we inserted any instructions between a Call and his CatchNode,
// clone the instructions on all paths below the Catch.
-void Block::call_catch_cleanup(Block_Array &bbs) {
+void Block::call_catch_cleanup(Block_Array &bbs, Compile* C) {
// End of region to clone
uint end = end_idx();
@@ -1068,7 +1068,7 @@
// Remove the now-dead cloned ops
for(uint i3 = beg; i3 < end; i3++ ) {
- _nodes[beg]->disconnect_inputs(NULL);
+ _nodes[beg]->disconnect_inputs(NULL, C);
_nodes.remove(beg);
}
@@ -1081,7 +1081,7 @@
Node *n = sb->_nodes[j];
if (n->outcnt() == 0 &&
(!n->is_Proj() || n->as_Proj()->in(0)->outcnt() == 1) ){
- n->disconnect_inputs(NULL);
+ n->disconnect_inputs(NULL, C);
sb->_nodes.remove(j);
new_cnt--;
}
--- a/hotspot/src/share/vm/opto/library_call.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/library_call.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -67,30 +67,64 @@
// Local helper class for LibraryIntrinsic:
class LibraryCallKit : public GraphKit {
private:
- LibraryIntrinsic* _intrinsic; // the library intrinsic being called
+ LibraryIntrinsic* _intrinsic; // the library intrinsic being called
+ Node* _result; // the result node, if any
+ int _reexecute_sp; // the stack pointer when bytecode needs to be reexecuted
const TypeOopPtr* sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr = false);
public:
- LibraryCallKit(JVMState* caller, LibraryIntrinsic* intrinsic)
- : GraphKit(caller),
- _intrinsic(intrinsic)
+ LibraryCallKit(JVMState* jvms, LibraryIntrinsic* intrinsic)
+ : GraphKit(jvms),
+ _intrinsic(intrinsic),
+ _result(NULL)
{
+ // Check if this is a root compile. In that case we don't have a caller.
+ if (!jvms->has_method()) {
+ _reexecute_sp = sp();
+ } else {
+ // Find out how many arguments the interpreter needs when deoptimizing
+ // and save the stack pointer value so it can used by uncommon_trap.
+ // We find the argument count by looking at the declared signature.
+ bool ignored_will_link;
+ ciSignature* declared_signature = NULL;
+ ciMethod* ignored_callee = caller()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
+ const int nargs = declared_signature->arg_size_for_bc(caller()->java_code_at_bci(bci()));
+ _reexecute_sp = sp() + nargs; // "push" arguments back on stack
+ }
}
+ virtual LibraryCallKit* is_LibraryCallKit() const { return (LibraryCallKit*)this; }
+
ciMethod* caller() const { return jvms()->method(); }
int bci() const { return jvms()->bci(); }
LibraryIntrinsic* intrinsic() const { return _intrinsic; }
vmIntrinsics::ID intrinsic_id() const { return _intrinsic->intrinsic_id(); }
ciMethod* callee() const { return _intrinsic->method(); }
- ciSignature* signature() const { return callee()->signature(); }
- int arg_size() const { return callee()->arg_size(); }
bool try_to_inline();
Node* try_to_predicate();
+ void push_result() {
+ // Push the result onto the stack.
+ if (!stopped() && result() != NULL) {
+ BasicType bt = result()->bottom_type()->basic_type();
+ push_node(bt, result());
+ }
+ }
+
+ private:
+ void fatal_unexpected_iid(vmIntrinsics::ID iid) {
+ fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
+ }
+
+ void set_result(Node* n) { assert(_result == NULL, "only set once"); _result = n; }
+ void set_result(RegionNode* region, PhiNode* value);
+ Node* result() { return _result; }
+
+ virtual int reexecute_sp() { return _reexecute_sp; }
+
// Helper functions to inline natives
- void push_result(RegionNode* region, PhiNode* value);
Node* generate_guard(Node* test, RegionNode* region, float true_prob);
Node* generate_slow_guard(Node* test, RegionNode* region);
Node* generate_fair_guard(Node* test, RegionNode* region);
@@ -108,21 +142,19 @@
bool disjoint_bases, const char* &name, bool dest_uninitialized);
Node* load_mirror_from_klass(Node* klass);
Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null,
- int nargs,
RegionNode* region, int null_path,
int offset);
- Node* load_klass_from_mirror(Node* mirror, bool never_see_null, int nargs,
+ Node* load_klass_from_mirror(Node* mirror, bool never_see_null,
RegionNode* region, int null_path) {
int offset = java_lang_Class::klass_offset_in_bytes();
- return load_klass_from_mirror_common(mirror, never_see_null, nargs,
+ return load_klass_from_mirror_common(mirror, never_see_null,
region, null_path,
offset);
}
Node* load_array_klass_from_mirror(Node* mirror, bool never_see_null,
- int nargs,
RegionNode* region, int null_path) {
int offset = java_lang_Class::array_klass_offset_in_bytes();
- return load_klass_from_mirror_common(mirror, never_see_null, nargs,
+ return load_klass_from_mirror_common(mirror, never_see_null,
region, null_path,
offset);
}
@@ -161,16 +193,14 @@
bool inline_string_indexOf();
Node* string_indexOf(Node* string_object, ciTypeArray* target_array, jint offset, jint cache_i, jint md2_i);
bool inline_string_equals();
- Node* pop_math_arg();
+ Node* round_double_node(Node* n);
bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
bool inline_math_native(vmIntrinsics::ID id);
bool inline_trig(vmIntrinsics::ID id);
- bool inline_trans(vmIntrinsics::ID id);
- bool inline_abs(vmIntrinsics::ID id);
- bool inline_sqrt(vmIntrinsics::ID id);
+ bool inline_math(vmIntrinsics::ID id);
+ bool inline_exp();
+ bool inline_pow();
void finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName);
- bool inline_pow(vmIntrinsics::ID id);
- bool inline_exp(vmIntrinsics::ID id);
bool inline_min_max(vmIntrinsics::ID id);
Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
// This returns Type::AnyPtr, RawPtr, or OopPtr.
@@ -179,7 +209,7 @@
// Helper for inline_unsafe_access.
// Generates the guards that check whether the result of
// Unsafe.getObject should be recorded in an SATB log buffer.
- void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, int nargs, bool need_mem_bar);
+ void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile);
bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static);
bool inline_unsafe_allocate();
@@ -253,11 +283,7 @@
bool inline_unsafe_load_store(BasicType type, LoadStoreKind kind);
bool inline_unsafe_ordered_store(BasicType type);
bool inline_fp_conversions(vmIntrinsics::ID id);
- bool inline_numberOfLeadingZeros(vmIntrinsics::ID id);
- bool inline_numberOfTrailingZeros(vmIntrinsics::ID id);
- bool inline_bitCount(vmIntrinsics::ID id);
- bool inline_reverseBytes(vmIntrinsics::ID id);
-
+ bool inline_number_methods(vmIntrinsics::ID id);
bool inline_reference_get();
bool inline_aescrypt_Block(vmIntrinsics::ID id);
bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
@@ -321,15 +347,18 @@
switch (id) {
case vmIntrinsics::_compareTo:
if (!SpecialStringCompareTo) return NULL;
+ if (!Matcher::match_rule_supported(Op_StrComp)) return NULL;
break;
case vmIntrinsics::_indexOf:
if (!SpecialStringIndexOf) return NULL;
break;
case vmIntrinsics::_equals:
if (!SpecialStringEquals) return NULL;
+ if (!Matcher::match_rule_supported(Op_StrEquals)) return NULL;
break;
case vmIntrinsics::_equalsC:
if (!SpecialArraysEquals) return NULL;
+ if (!Matcher::match_rule_supported(Op_AryEq)) return NULL;
break;
case vmIntrinsics::_arraycopy:
if (!InlineArrayCopy) return NULL;
@@ -382,6 +411,19 @@
if (!Matcher::match_rule_supported(Op_CountTrailingZerosL)) return NULL;
break;
+ case vmIntrinsics::_reverseBytes_c:
+ if (!Matcher::match_rule_supported(Op_ReverseBytesUS)) return false;
+ break;
+ case vmIntrinsics::_reverseBytes_s:
+ if (!Matcher::match_rule_supported(Op_ReverseBytesS)) return false;
+ break;
+ case vmIntrinsics::_reverseBytes_i:
+ if (!Matcher::match_rule_supported(Op_ReverseBytesI)) return false;
+ break;
+ case vmIntrinsics::_reverseBytes_l:
+ if (!Matcher::match_rule_supported(Op_ReverseBytesL)) return false;
+ break;
+
case vmIntrinsics::_Reference_get:
// Use the intrinsic version of Reference.get() so that the value in
// the referent field can be registered by the G1 pre-barrier code.
@@ -488,10 +530,13 @@
tty->print_cr("Intrinsic %s", str);
}
#endif
-
+ ciMethod* callee = kit.callee();
+ const int bci = kit.bci();
+
+ // Try to inline the intrinsic.
if (kit.try_to_inline()) {
if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
- CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, kit.bci(), is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
+ CompileTask::print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
}
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
if (C->log()) {
@@ -500,6 +545,8 @@
(is_virtual() ? " virtual='1'" : ""),
C->unique() - nodes);
}
+ // Push the result from the inlined method onto the stack.
+ kit.push_result();
return kit.transfer_exceptions_into_jvms();
}
@@ -508,12 +555,12 @@
if (jvms->has_method()) {
// Not a root compile.
const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
- CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, kit.bci(), msg);
+ CompileTask::print_inlining(callee, jvms->depth() - 1, bci, msg);
} else {
// Root compile
tty->print("Did not generate intrinsic %s%s at bci:%d in",
vmIntrinsics::name_at(intrinsic_id()),
- (is_virtual() ? " (virtual)" : ""), kit.bci());
+ (is_virtual() ? " (virtual)" : ""), bci);
}
}
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
@@ -532,9 +579,15 @@
tty->print_cr("Predicate for intrinsic %s", str);
}
#endif
+ ciMethod* callee = kit.callee();
+ const int bci = kit.bci();
Node* slow_ctl = kit.try_to_predicate();
if (!kit.failing()) {
+ if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+ CompileTask::print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
+ }
+ C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
if (C->log()) {
C->log()->elem("predicate_intrinsic id='%s'%s nodes='%d'",
vmIntrinsics::name_at(intrinsic_id()),
@@ -549,12 +602,12 @@
if (jvms->has_method()) {
// Not a root compile.
const char* msg = "failed to generate predicate for intrinsic";
- CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, kit.bci(), msg);
+ CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, bci, msg);
} else {
// Root compile
tty->print("Did not generate predicate for intrinsic %s%s at bci:%d in",
vmIntrinsics::name_at(intrinsic_id()),
- (is_virtual() ? " (virtual)" : ""), kit.bci());
+ (is_virtual() ? " (virtual)" : ""), bci);
}
}
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
@@ -566,6 +619,7 @@
const bool is_store = true;
const bool is_native_ptr = true;
const bool is_static = true;
+ const bool is_volatile = true;
if (!jvms()->has_method()) {
// Root JVMState has a null method.
@@ -575,13 +629,11 @@
}
assert(merged_memory(), "");
+
switch (intrinsic_id()) {
- case vmIntrinsics::_hashCode:
- return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
- case vmIntrinsics::_identityHashCode:
- return inline_native_hashcode(/*!virtual*/ false, is_static);
- case vmIntrinsics::_getClass:
- return inline_native_getClass();
+ case vmIntrinsics::_hashCode: return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
+ case vmIntrinsics::_identityHashCode: return inline_native_hashcode(/*!virtual*/ false, is_static);
+ case vmIntrinsics::_getClass: return inline_native_getClass();
case vmIntrinsics::_dsin:
case vmIntrinsics::_dcos:
@@ -592,203 +644,114 @@
case vmIntrinsics::_dexp:
case vmIntrinsics::_dlog:
case vmIntrinsics::_dlog10:
- case vmIntrinsics::_dpow:
- return inline_math_native(intrinsic_id());
+ case vmIntrinsics::_dpow: return inline_math_native(intrinsic_id());
case vmIntrinsics::_min:
- case vmIntrinsics::_max:
- return inline_min_max(intrinsic_id());
-
- case vmIntrinsics::_arraycopy:
- return inline_arraycopy();
-
- case vmIntrinsics::_compareTo:
- return inline_string_compareTo();
- case vmIntrinsics::_indexOf:
- return inline_string_indexOf();
- case vmIntrinsics::_equals:
- return inline_string_equals();
-
- case vmIntrinsics::_getObject:
- return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, false);
- case vmIntrinsics::_getBoolean:
- return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, false);
- case vmIntrinsics::_getByte:
- return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, false);
- case vmIntrinsics::_getShort:
- return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, false);
- case vmIntrinsics::_getChar:
- return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, false);
- case vmIntrinsics::_getInt:
- return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, false);
- case vmIntrinsics::_getLong:
- return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, false);
- case vmIntrinsics::_getFloat:
- return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, false);
- case vmIntrinsics::_getDouble:
- return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, false);
-
- case vmIntrinsics::_putObject:
- return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, false);
- case vmIntrinsics::_putBoolean:
- return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, false);
- case vmIntrinsics::_putByte:
- return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, false);
- case vmIntrinsics::_putShort:
- return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, false);
- case vmIntrinsics::_putChar:
- return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, false);
- case vmIntrinsics::_putInt:
- return inline_unsafe_access(!is_native_ptr, is_store, T_INT, false);
- case vmIntrinsics::_putLong:
- return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, false);
- case vmIntrinsics::_putFloat:
- return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, false);
- case vmIntrinsics::_putDouble:
- return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, false);
-
- case vmIntrinsics::_getByte_raw:
- return inline_unsafe_access(is_native_ptr, !is_store, T_BYTE, false);
- case vmIntrinsics::_getShort_raw:
- return inline_unsafe_access(is_native_ptr, !is_store, T_SHORT, false);
- case vmIntrinsics::_getChar_raw:
- return inline_unsafe_access(is_native_ptr, !is_store, T_CHAR, false);
- case vmIntrinsics::_getInt_raw:
- return inline_unsafe_access(is_native_ptr, !is_store, T_INT, false);
- case vmIntrinsics::_getLong_raw:
- return inline_unsafe_access(is_native_ptr, !is_store, T_LONG, false);
- case vmIntrinsics::_getFloat_raw:
- return inline_unsafe_access(is_native_ptr, !is_store, T_FLOAT, false);
- case vmIntrinsics::_getDouble_raw:
- return inline_unsafe_access(is_native_ptr, !is_store, T_DOUBLE, false);
- case vmIntrinsics::_getAddress_raw:
- return inline_unsafe_access(is_native_ptr, !is_store, T_ADDRESS, false);
-
- case vmIntrinsics::_putByte_raw:
- return inline_unsafe_access(is_native_ptr, is_store, T_BYTE, false);
- case vmIntrinsics::_putShort_raw:
- return inline_unsafe_access(is_native_ptr, is_store, T_SHORT, false);
- case vmIntrinsics::_putChar_raw:
- return inline_unsafe_access(is_native_ptr, is_store, T_CHAR, false);
- case vmIntrinsics::_putInt_raw:
- return inline_unsafe_access(is_native_ptr, is_store, T_INT, false);
- case vmIntrinsics::_putLong_raw:
- return inline_unsafe_access(is_native_ptr, is_store, T_LONG, false);
- case vmIntrinsics::_putFloat_raw:
- return inline_unsafe_access(is_native_ptr, is_store, T_FLOAT, false);
- case vmIntrinsics::_putDouble_raw:
- return inline_unsafe_access(is_native_ptr, is_store, T_DOUBLE, false);
- case vmIntrinsics::_putAddress_raw:
- return inline_unsafe_access(is_native_ptr, is_store, T_ADDRESS, false);
-
- case vmIntrinsics::_getObjectVolatile:
- return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, true);
- case vmIntrinsics::_getBooleanVolatile:
- return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, true);
- case vmIntrinsics::_getByteVolatile:
- return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, true);
- case vmIntrinsics::_getShortVolatile:
- return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, true);
- case vmIntrinsics::_getCharVolatile:
- return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, true);
- case vmIntrinsics::_getIntVolatile:
- return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, true);
- case vmIntrinsics::_getLongVolatile:
- return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, true);
- case vmIntrinsics::_getFloatVolatile:
- return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, true);
- case vmIntrinsics::_getDoubleVolatile:
- return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, true);
-
- case vmIntrinsics::_putObjectVolatile:
- return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, true);
- case vmIntrinsics::_putBooleanVolatile:
- return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, true);
- case vmIntrinsics::_putByteVolatile:
- return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, true);
- case vmIntrinsics::_putShortVolatile:
- return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, true);
- case vmIntrinsics::_putCharVolatile:
- return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, true);
- case vmIntrinsics::_putIntVolatile:
- return inline_unsafe_access(!is_native_ptr, is_store, T_INT, true);
- case vmIntrinsics::_putLongVolatile:
- return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, true);
- case vmIntrinsics::_putFloatVolatile:
- return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, true);
- case vmIntrinsics::_putDoubleVolatile:
- return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, true);
-
- case vmIntrinsics::_prefetchRead:
- return inline_unsafe_prefetch(!is_native_ptr, !is_store, !is_static);
- case vmIntrinsics::_prefetchWrite:
- return inline_unsafe_prefetch(!is_native_ptr, is_store, !is_static);
- case vmIntrinsics::_prefetchReadStatic:
- return inline_unsafe_prefetch(!is_native_ptr, !is_store, is_static);
- case vmIntrinsics::_prefetchWriteStatic:
- return inline_unsafe_prefetch(!is_native_ptr, is_store, is_static);
-
- case vmIntrinsics::_compareAndSwapObject:
- return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg);
- case vmIntrinsics::_compareAndSwapInt:
- return inline_unsafe_load_store(T_INT, LS_cmpxchg);
- case vmIntrinsics::_compareAndSwapLong:
- return inline_unsafe_load_store(T_LONG, LS_cmpxchg);
-
- case vmIntrinsics::_putOrderedObject:
- return inline_unsafe_ordered_store(T_OBJECT);
- case vmIntrinsics::_putOrderedInt:
- return inline_unsafe_ordered_store(T_INT);
- case vmIntrinsics::_putOrderedLong:
- return inline_unsafe_ordered_store(T_LONG);
-
- case vmIntrinsics::_getAndAddInt:
- return inline_unsafe_load_store(T_INT, LS_xadd);
- case vmIntrinsics::_getAndAddLong:
- return inline_unsafe_load_store(T_LONG, LS_xadd);
- case vmIntrinsics::_getAndSetInt:
- return inline_unsafe_load_store(T_INT, LS_xchg);
- case vmIntrinsics::_getAndSetLong:
- return inline_unsafe_load_store(T_LONG, LS_xchg);
- case vmIntrinsics::_getAndSetObject:
- return inline_unsafe_load_store(T_OBJECT, LS_xchg);
-
- case vmIntrinsics::_currentThread:
- return inline_native_currentThread();
- case vmIntrinsics::_isInterrupted:
- return inline_native_isInterrupted();
+ case vmIntrinsics::_max: return inline_min_max(intrinsic_id());
+
+ case vmIntrinsics::_arraycopy: return inline_arraycopy();
+
+ case vmIntrinsics::_compareTo: return inline_string_compareTo();
+ case vmIntrinsics::_indexOf: return inline_string_indexOf();
+ case vmIntrinsics::_equals: return inline_string_equals();
+
+ case vmIntrinsics::_getObject: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, !is_volatile);
+ case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile);
+ case vmIntrinsics::_getByte: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, !is_volatile);
+ case vmIntrinsics::_getShort: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, !is_volatile);
+ case vmIntrinsics::_getChar: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, !is_volatile);
+ case vmIntrinsics::_getInt: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, !is_volatile);
+ case vmIntrinsics::_getLong: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, !is_volatile);
+ case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, !is_volatile);
+ case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, !is_volatile);
+
+ case vmIntrinsics::_putObject: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, !is_volatile);
+ case vmIntrinsics::_putBoolean: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, !is_volatile);
+ case vmIntrinsics::_putByte: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, !is_volatile);
+ case vmIntrinsics::_putShort: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, !is_volatile);
+ case vmIntrinsics::_putChar: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, !is_volatile);
+ case vmIntrinsics::_putInt: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, !is_volatile);
+ case vmIntrinsics::_putLong: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, !is_volatile);
+ case vmIntrinsics::_putFloat: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, !is_volatile);
+ case vmIntrinsics::_putDouble: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, !is_volatile);
+
+ case vmIntrinsics::_getByte_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE, !is_volatile);
+ case vmIntrinsics::_getShort_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT, !is_volatile);
+ case vmIntrinsics::_getChar_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR, !is_volatile);
+ case vmIntrinsics::_getInt_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_INT, !is_volatile);
+ case vmIntrinsics::_getLong_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_LONG, !is_volatile);
+ case vmIntrinsics::_getFloat_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT, !is_volatile);
+ case vmIntrinsics::_getDouble_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE, !is_volatile);
+ case vmIntrinsics::_getAddress_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, !is_volatile);
+
+ case vmIntrinsics::_putByte_raw: return inline_unsafe_access( is_native_ptr, is_store, T_BYTE, !is_volatile);
+ case vmIntrinsics::_putShort_raw: return inline_unsafe_access( is_native_ptr, is_store, T_SHORT, !is_volatile);
+ case vmIntrinsics::_putChar_raw: return inline_unsafe_access( is_native_ptr, is_store, T_CHAR, !is_volatile);
+ case vmIntrinsics::_putInt_raw: return inline_unsafe_access( is_native_ptr, is_store, T_INT, !is_volatile);
+ case vmIntrinsics::_putLong_raw: return inline_unsafe_access( is_native_ptr, is_store, T_LONG, !is_volatile);
+ case vmIntrinsics::_putFloat_raw: return inline_unsafe_access( is_native_ptr, is_store, T_FLOAT, !is_volatile);
+ case vmIntrinsics::_putDouble_raw: return inline_unsafe_access( is_native_ptr, is_store, T_DOUBLE, !is_volatile);
+ case vmIntrinsics::_putAddress_raw: return inline_unsafe_access( is_native_ptr, is_store, T_ADDRESS, !is_volatile);
+
+ case vmIntrinsics::_getObjectVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, is_volatile);
+ case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, is_volatile);
+ case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, is_volatile);
+ case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, is_volatile);
+ case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, is_volatile);
+ case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, is_volatile);
+ case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, is_volatile);
+ case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, is_volatile);
+ case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, is_volatile);
+
+ case vmIntrinsics::_putObjectVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, is_volatile);
+ case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, is_volatile);
+ case vmIntrinsics::_putByteVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, is_volatile);
+ case vmIntrinsics::_putShortVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, is_volatile);
+ case vmIntrinsics::_putCharVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, is_volatile);
+ case vmIntrinsics::_putIntVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, is_volatile);
+ case vmIntrinsics::_putLongVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, is_volatile);
+ case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, is_volatile);
+ case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, is_volatile);
+
+ case vmIntrinsics::_prefetchRead: return inline_unsafe_prefetch(!is_native_ptr, !is_store, !is_static);
+ case vmIntrinsics::_prefetchWrite: return inline_unsafe_prefetch(!is_native_ptr, is_store, !is_static);
+ case vmIntrinsics::_prefetchReadStatic: return inline_unsafe_prefetch(!is_native_ptr, !is_store, is_static);
+ case vmIntrinsics::_prefetchWriteStatic: return inline_unsafe_prefetch(!is_native_ptr, is_store, is_static);
+
+ case vmIntrinsics::_compareAndSwapObject: return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg);
+ case vmIntrinsics::_compareAndSwapInt: return inline_unsafe_load_store(T_INT, LS_cmpxchg);
+ case vmIntrinsics::_compareAndSwapLong: return inline_unsafe_load_store(T_LONG, LS_cmpxchg);
+
+ case vmIntrinsics::_putOrderedObject: return inline_unsafe_ordered_store(T_OBJECT);
+ case vmIntrinsics::_putOrderedInt: return inline_unsafe_ordered_store(T_INT);
+ case vmIntrinsics::_putOrderedLong: return inline_unsafe_ordered_store(T_LONG);
+
+ case vmIntrinsics::_getAndAddInt: return inline_unsafe_load_store(T_INT, LS_xadd);
+ case vmIntrinsics::_getAndAddLong: return inline_unsafe_load_store(T_LONG, LS_xadd);
+ case vmIntrinsics::_getAndSetInt: return inline_unsafe_load_store(T_INT, LS_xchg);
+ case vmIntrinsics::_getAndSetLong: return inline_unsafe_load_store(T_LONG, LS_xchg);
+ case vmIntrinsics::_getAndSetObject: return inline_unsafe_load_store(T_OBJECT, LS_xchg);
+
+ case vmIntrinsics::_currentThread: return inline_native_currentThread();
+ case vmIntrinsics::_isInterrupted: return inline_native_isInterrupted();
#ifdef TRACE_HAVE_INTRINSICS
- case vmIntrinsics::_classID:
- return inline_native_classID();
- case vmIntrinsics::_threadID:
- return inline_native_threadID();
- case vmIntrinsics::_counterTime:
- return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime");
+ case vmIntrinsics::_classID: return inline_native_classID();
+ case vmIntrinsics::_threadID: return inline_native_threadID();
+ case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime");
#endif
- case vmIntrinsics::_currentTimeMillis:
- return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
- case vmIntrinsics::_nanoTime:
- return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
- case vmIntrinsics::_allocateInstance:
- return inline_unsafe_allocate();
- case vmIntrinsics::_copyMemory:
- return inline_unsafe_copyMemory();
- case vmIntrinsics::_newArray:
- return inline_native_newArray();
- case vmIntrinsics::_getLength:
- return inline_native_getLength();
- case vmIntrinsics::_copyOf:
- return inline_array_copyOf(false);
- case vmIntrinsics::_copyOfRange:
- return inline_array_copyOf(true);
- case vmIntrinsics::_equalsC:
- return inline_array_equals();
- case vmIntrinsics::_clone:
- return inline_native_clone(intrinsic()->is_virtual());
-
- case vmIntrinsics::_isAssignableFrom:
- return inline_native_subtype_check();
+ case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
+ case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
+ case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
+ case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
+ case vmIntrinsics::_newArray: return inline_native_newArray();
+ case vmIntrinsics::_getLength: return inline_native_getLength();
+ case vmIntrinsics::_copyOf: return inline_array_copyOf(false);
+ case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
+ case vmIntrinsics::_equalsC: return inline_array_equals();
+ case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
+
+ case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
case vmIntrinsics::_isInstance:
case vmIntrinsics::_getModifiers:
@@ -797,44 +760,32 @@
case vmIntrinsics::_isPrimitive:
case vmIntrinsics::_getSuperclass:
case vmIntrinsics::_getComponentType:
- case vmIntrinsics::_getClassAccessFlags:
- return inline_native_Class_query(intrinsic_id());
+ case vmIntrinsics::_getClassAccessFlags: return inline_native_Class_query(intrinsic_id());
case vmIntrinsics::_floatToRawIntBits:
case vmIntrinsics::_floatToIntBits:
case vmIntrinsics::_intBitsToFloat:
case vmIntrinsics::_doubleToRawLongBits:
case vmIntrinsics::_doubleToLongBits:
- case vmIntrinsics::_longBitsToDouble:
- return inline_fp_conversions(intrinsic_id());
+ case vmIntrinsics::_longBitsToDouble: return inline_fp_conversions(intrinsic_id());
case vmIntrinsics::_numberOfLeadingZeros_i:
case vmIntrinsics::_numberOfLeadingZeros_l:
- return inline_numberOfLeadingZeros(intrinsic_id());
-
case vmIntrinsics::_numberOfTrailingZeros_i:
case vmIntrinsics::_numberOfTrailingZeros_l:
- return inline_numberOfTrailingZeros(intrinsic_id());
-
case vmIntrinsics::_bitCount_i:
case vmIntrinsics::_bitCount_l:
- return inline_bitCount(intrinsic_id());
-
case vmIntrinsics::_reverseBytes_i:
case vmIntrinsics::_reverseBytes_l:
case vmIntrinsics::_reverseBytes_s:
- case vmIntrinsics::_reverseBytes_c:
- return inline_reverseBytes((vmIntrinsics::ID) intrinsic_id());
-
- case vmIntrinsics::_getCallerClass:
- return inline_native_Reflection_getCallerClass();
-
- case vmIntrinsics::_Reference_get:
- return inline_reference_get();
+ case vmIntrinsics::_reverseBytes_c: return inline_number_methods(intrinsic_id());
+
+ case vmIntrinsics::_getCallerClass: return inline_native_Reflection_getCallerClass();
+
+ case vmIntrinsics::_Reference_get: return inline_reference_get();
case vmIntrinsics::_aescrypt_encryptBlock:
- case vmIntrinsics::_aescrypt_decryptBlock:
- return inline_aescrypt_Block(intrinsic_id());
+ case vmIntrinsics::_aescrypt_decryptBlock: return inline_aescrypt_Block(intrinsic_id());
case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
@@ -883,13 +834,13 @@
}
}
-//------------------------------push_result------------------------------
+//------------------------------set_result-------------------------------
// Helper function for finishing intrinsics.
-void LibraryCallKit::push_result(RegionNode* region, PhiNode* value) {
+void LibraryCallKit::set_result(RegionNode* region, PhiNode* value) {
record_for_igvn(region);
set_control(_gvn.transform(region));
- BasicType value_type = value->type()->basic_type();
- push_node(value_type, _gvn.transform(value));
+ set_result( _gvn.transform(value));
+ assert(value->type()->basic_type() == result()->bottom_type()->basic_type(), "sanity");
}
//------------------------------generate_guard---------------------------
@@ -1078,7 +1029,6 @@
// to Int nodes containing the lenghts of str1 and str2.
//
Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2) {
-
Node* result = NULL;
switch (opcode) {
case Op_StrIndexOf:
@@ -1105,51 +1055,23 @@
}
//------------------------------inline_string_compareTo------------------------
+// public int java.lang.String.compareTo(String anotherString);
bool LibraryCallKit::inline_string_compareTo() {
-
- if (!Matcher::has_match_rule(Op_StrComp)) return false;
-
- _sp += 2;
- Node *argument = pop(); // pop non-receiver first: it was pushed second
- Node *receiver = pop();
-
- // Null check on self without removing any arguments. The argument
- // null check technically happens in the wrong place, which can lead to
- // invalid stack traces when string compare is inlined into a method
- // which handles NullPointerExceptions.
- _sp += 2;
- receiver = do_null_check(receiver, T_OBJECT);
- argument = do_null_check(argument, T_OBJECT);
- _sp -= 2;
+ Node* receiver = null_check(argument(0));
+ Node* arg = null_check(argument(1));
if (stopped()) {
return true;
}
-
- Node* compare = make_string_method_node(Op_StrComp, receiver, argument);
- push(compare);
+ set_result(make_string_method_node(Op_StrComp, receiver, arg));
return true;
}
//------------------------------inline_string_equals------------------------
bool LibraryCallKit::inline_string_equals() {
-
- if (!Matcher::has_match_rule(Op_StrEquals)) return false;
-
- int nargs = 2;
- _sp += nargs;
- Node* argument = pop(); // pop non-receiver first: it was pushed second
- Node* receiver = pop();
-
- // Null check on self without removing any arguments. The argument
- // null check technically happens in the wrong place, which can lead to
- // invalid stack traces when string compare is inlined into a method
- // which handles NullPointerExceptions.
- _sp += nargs;
- receiver = do_null_check(receiver, T_OBJECT);
- //should not do null check for argument for String.equals(), because spec
- //allows to specify NULL as argument.
- _sp -= nargs;
-
+ Node* receiver = null_check_receiver();
+ // NOTE: Do not null check argument for String.equals() because spec
+ // allows to specify NULL as argument.
+ Node* argument = this->argument(1);
if (stopped()) {
return true;
}
@@ -1173,9 +1095,7 @@
ciInstanceKlass* klass = env()->String_klass();
if (!stopped()) {
- _sp += nargs; // gen_instanceof might do an uncommon trap
Node* inst = gen_instanceof(argument, makecon(TypeKlassPtr::make(klass)));
- _sp -= nargs;
Node* cmp = _gvn.transform(new (C) CmpINode(inst, intcon(1)));
Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne));
@@ -1207,7 +1127,7 @@
Node* receiver_cnt = load_String_length(no_ctrl, receiver);
// Get start addr of argument
- Node* argument_val = load_String_value(no_ctrl, argument);
+ Node* argument_val = load_String_value(no_ctrl, argument);
Node* argument_offset = load_String_offset(no_ctrl, argument);
Node* argument_start = array_element_address(argument_val, argument_offset, T_CHAR);
@@ -1236,24 +1156,15 @@
set_control(_gvn.transform(region));
record_for_igvn(region);
- push(_gvn.transform(phi));
-
+ set_result(_gvn.transform(phi));
return true;
}
//------------------------------inline_array_equals----------------------------
bool LibraryCallKit::inline_array_equals() {
-
- if (!Matcher::has_match_rule(Op_AryEq)) return false;
-
- _sp += 2;
- Node *argument2 = pop();
- Node *argument1 = pop();
-
- Node* equals =
- _gvn.transform(new (C) AryEqNode(control(), memory(TypeAryPtr::CHARS),
- argument1, argument2) );
- push(equals);
+ Node* arg1 = argument(0);
+ Node* arg2 = argument(1);
+ set_result(_gvn.transform(new (C) AryEqNode(control(), memory(TypeAryPtr::CHARS), arg1, arg2)));
return true;
}
@@ -1325,7 +1236,7 @@
float likely = PROB_LIKELY(0.9);
float unlikely = PROB_UNLIKELY(0.9);
- const int nargs = 2; // number of arguments to push back for uncommon trap in predicate
+ const int nargs = 0; // no arguments to push back for uncommon trap in predicate
Node* source = load_String_value(no_ctrl, string_object);
Node* sourceOffset = load_String_offset(no_ctrl, string_object);
@@ -1396,10 +1307,8 @@
//------------------------------inline_string_indexOf------------------------
bool LibraryCallKit::inline_string_indexOf() {
-
- _sp += 2;
- Node *argument = pop(); // pop non-receiver first: it was pushed second
- Node *receiver = pop();
+ Node* receiver = argument(0);
+ Node* arg = argument(1);
Node* result;
// Disable the use of pcmpestri until it can be guaranteed that
@@ -1409,15 +1318,8 @@
// Generate SSE4.2 version of indexOf
// We currently only have match rules that use SSE4.2
- // Null check on self without removing any arguments. The argument
- // null check technically happens in the wrong place, which can lead to
- // invalid stack traces when string compare is inlined into a method
- // which handles NullPointerExceptions.
- _sp += 2;
- receiver = do_null_check(receiver, T_OBJECT);
- argument = do_null_check(argument, T_OBJECT);
- _sp -= 2;
-
+ receiver = null_check(receiver);
+ arg = null_check(arg);
if (stopped()) {
return true;
}
@@ -1439,12 +1341,12 @@
Node* source_cnt = load_String_length(no_ctrl, receiver);
// Get start addr of substring
- Node* substr = load_String_value(no_ctrl, argument);
- Node* substr_offset = load_String_offset(no_ctrl, argument);
+ Node* substr = load_String_value(no_ctrl, arg);
+ Node* substr_offset = load_String_offset(no_ctrl, arg);
Node* substr_start = array_element_address(substr, substr_offset, T_CHAR);
// Get length of source string
- Node* substr_cnt = load_String_length(no_ctrl, argument);
+ Node* substr_cnt = load_String_length(no_ctrl, arg);
// Check for substr count > string count
Node* cmp = _gvn.transform( new(C) CmpINode(substr_cnt, source_cnt) );
@@ -1477,10 +1379,10 @@
} else { // Use LibraryCallKit::string_indexOf
// don't intrinsify if argument isn't a constant string.
- if (!argument->is_Con()) {
+ if (!arg->is_Con()) {
return false;
}
- const TypeOopPtr* str_type = _gvn.type(argument)->isa_oopptr();
+ const TypeOopPtr* str_type = _gvn.type(arg)->isa_oopptr();
if (str_type == NULL) {
return false;
}
@@ -1511,21 +1413,15 @@
return false;
}
- // Null check on self without removing any arguments. The argument
- // null check technically happens in the wrong place, which can lead to
- // invalid stack traces when string compare is inlined into a method
- // which handles NullPointerExceptions.
- _sp += 2;
- receiver = do_null_check(receiver, T_OBJECT);
- // No null check on the argument is needed since it's a constant String oop.
- _sp -= 2;
+ receiver = null_check(receiver, T_OBJECT);
+ // NOTE: No null check on the argument is needed since it's a constant String oop.
if (stopped()) {
return true;
}
// The null string as a pattern always returns 0 (match at beginning of string)
if (c == 0) {
- push(intcon(0));
+ set_result(intcon(0));
return true;
}
@@ -1548,47 +1444,54 @@
result = string_indexOf(receiver, pat, o, cache, md2);
}
-
- push(result);
+ set_result(result);
return true;
}
-//--------------------------pop_math_arg--------------------------------
-// Pop a double argument to a math function from the stack
-// rounding it if necessary.
-Node * LibraryCallKit::pop_math_arg() {
- Node *arg = pop_pair();
- if( Matcher::strict_fp_requires_explicit_rounding && UseSSE<=1 )
- arg = _gvn.transform( new (C) RoundDoubleNode(0, arg) );
- return arg;
+//--------------------------round_double_node--------------------------------
+// Round a double node if necessary.
+Node* LibraryCallKit::round_double_node(Node* n) {
+ if (Matcher::strict_fp_requires_explicit_rounding && UseSSE <= 1)
+ n = _gvn.transform(new (C) RoundDoubleNode(0, n));
+ return n;
+}
+
+//------------------------------inline_math-----------------------------------
+// public static double Math.abs(double)
+// public static double Math.sqrt(double)
+// public static double Math.log(double)
+// public static double Math.log10(double)
+bool LibraryCallKit::inline_math(vmIntrinsics::ID id) {
+ Node* arg = round_double_node(argument(0));
+ Node* n;
+ switch (id) {
+ case vmIntrinsics::_dabs: n = new (C) AbsDNode( arg); break;
+ case vmIntrinsics::_dsqrt: n = new (C) SqrtDNode(0, arg); break;
+ case vmIntrinsics::_dlog: n = new (C) LogDNode( arg); break;
+ case vmIntrinsics::_dlog10: n = new (C) Log10DNode( arg); break;
+ default: fatal_unexpected_iid(id); break;
+ }
+ set_result(_gvn.transform(n));
+ return true;
}
//------------------------------inline_trig----------------------------------
// Inline sin/cos/tan instructions, if possible. If rounding is required, do
// argument reduction which will turn into a fast/slow diamond.
bool LibraryCallKit::inline_trig(vmIntrinsics::ID id) {
- _sp += arg_size(); // restore stack pointer
- Node* arg = pop_math_arg();
- Node* trig = NULL;
+ Node* arg = round_double_node(argument(0));
+ Node* n = NULL;
switch (id) {
- case vmIntrinsics::_dsin:
- trig = _gvn.transform((Node*)new (C) SinDNode(arg));
- break;
- case vmIntrinsics::_dcos:
- trig = _gvn.transform((Node*)new (C) CosDNode(arg));
- break;
- case vmIntrinsics::_dtan:
- trig = _gvn.transform((Node*)new (C) TanDNode(arg));
- break;
- default:
- assert(false, "bad intrinsic was passed in");
- return false;
+ case vmIntrinsics::_dsin: n = new (C) SinDNode(arg); break;
+ case vmIntrinsics::_dcos: n = new (C) CosDNode(arg); break;
+ case vmIntrinsics::_dtan: n = new (C) TanDNode(arg); break;
+ default: fatal_unexpected_iid(id); break;
}
+ n = _gvn.transform(n);
// Rounding required? Check for argument reduction!
- if( Matcher::strict_fp_requires_explicit_rounding ) {
-
+ if (Matcher::strict_fp_requires_explicit_rounding) {
static const double pi_4 = 0.7853981633974483;
static const double neg_pi_4 = -0.7853981633974483;
// pi/2 in 80-bit extended precision
@@ -1623,8 +1526,8 @@
// probably do the math inside the SIN encoding.
// Make the merge point
- RegionNode *r = new (C) RegionNode(3);
- Node *phi = new (C) PhiNode(r,Type::DOUBLE);
+ RegionNode* r = new (C) RegionNode(3);
+ Node* phi = new (C) PhiNode(r, Type::DOUBLE);
// Flatten arg so we need only 1 test
Node *abs = _gvn.transform(new (C) AbsDNode(arg));
@@ -1639,7 +1542,7 @@
set_control(opt_iff(r,iff));
// Set fast path result
- phi->init_req(2,trig);
+ phi->init_req(2, n);
// Slow path - non-blocking leaf call
Node* call = NULL;
@@ -1661,37 +1564,18 @@
break;
}
assert(control()->in(0) == call, "");
- Node* slow_result = _gvn.transform(new (C) ProjNode(call,TypeFunc::Parms));
- r->init_req(1,control());
- phi->init_req(1,slow_result);
+ Node* slow_result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
+ r->init_req(1, control());
+ phi->init_req(1, slow_result);
// Post-merge
set_control(_gvn.transform(r));
record_for_igvn(r);
- trig = _gvn.transform(phi);
+ n = _gvn.transform(phi);
C->set_has_split_ifs(true); // Has chance for split-if optimization
}
- // Push result back on JVM stack
- push_pair(trig);
- return true;
-}
-
-//------------------------------inline_sqrt-------------------------------------
-// Inline square root instruction, if possible.
-bool LibraryCallKit::inline_sqrt(vmIntrinsics::ID id) {
- assert(id == vmIntrinsics::_dsqrt, "Not square root");
- _sp += arg_size(); // restore stack pointer
- push_pair(_gvn.transform(new (C) SqrtDNode(0, pop_math_arg())));
- return true;
-}
-
-//------------------------------inline_abs-------------------------------------
-// Inline absolute value instruction, if possible.
-bool LibraryCallKit::inline_abs(vmIntrinsics::ID id) {
- assert(id == vmIntrinsics::_dabs, "Not absolute value");
- _sp += arg_size(); // restore stack pointer
- push_pair(_gvn.transform(new (C) AbsDNode(pop_math_arg())));
+ set_result(n);
return true;
}
@@ -1700,24 +1584,18 @@
//result=(result.isNaN())? funcAddr():result;
// Check: If isNaN() by checking result!=result? then either trap
// or go to runtime
- Node* cmpisnan = _gvn.transform(new (C) CmpDNode(result,result));
+ Node* cmpisnan = _gvn.transform(new (C) CmpDNode(result, result));
// Build the boolean node
- Node* bolisnum = _gvn.transform( new (C) BoolNode(cmpisnan, BoolTest::eq) );
+ Node* bolisnum = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::eq));
if (!too_many_traps(Deoptimization::Reason_intrinsic)) {
- {
- BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT);
- // End the current control-flow path
- push_pair(x);
- if (y != NULL) {
- push_pair(y);
- }
+ { BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT);
// The pow or exp intrinsic returned a NaN, which requires a call
// to the runtime. Recompile with the runtime call.
uncommon_trap(Deoptimization::Reason_intrinsic,
Deoptimization::Action_make_not_entrant);
}
- push_pair(result);
+ set_result(result);
} else {
// If this inlining ever returned NaN in the past, we compile a call
// to the runtime to properly handle corner cases
@@ -1727,7 +1605,7 @@
Node* if_fast = _gvn.transform( new (C) IfTrueNode(iff) );
if (!if_slow->is_top()) {
- RegionNode* result_region = new(C) RegionNode(3);
+ RegionNode* result_region = new (C) RegionNode(3);
PhiNode* result_val = new (C) PhiNode(result_region, Type::DOUBLE);
result_region->init_req(1, if_fast);
@@ -1747,9 +1625,9 @@
result_region->init_req(2, control());
result_val->init_req(2, value);
- push_result(result_region, result_val);
+ set_result(result_region, result_val);
} else {
- push_pair(result);
+ set_result(result);
}
}
}
@@ -1757,25 +1635,19 @@
//------------------------------inline_exp-------------------------------------
// Inline exp instructions, if possible. The Intel hardware only misses
// really odd corner cases (+/- Infinity). Just uncommon-trap them.
-bool LibraryCallKit::inline_exp(vmIntrinsics::ID id) {
- assert(id == vmIntrinsics::_dexp, "Not exp");
-
- _sp += arg_size(); // restore stack pointer
- Node *x = pop_math_arg();
- Node *result = _gvn.transform(new (C) ExpDNode(0,x));
-
- finish_pow_exp(result, x, NULL, OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
+bool LibraryCallKit::inline_exp() {
+ Node* arg = round_double_node(argument(0));
+ Node* n = _gvn.transform(new (C) ExpDNode(0, arg));
+
+ finish_pow_exp(n, arg, NULL, OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
C->set_has_split_ifs(true); // Has chance for split-if optimization
-
return true;
}
//------------------------------inline_pow-------------------------------------
// Inline power instructions, if possible.
-bool LibraryCallKit::inline_pow(vmIntrinsics::ID id) {
- assert(id == vmIntrinsics::_dpow, "Not pow");
-
+bool LibraryCallKit::inline_pow() {
// Pseudocode for pow
// if (x <= 0.0) {
// long longy = (long)y;
@@ -1793,15 +1665,14 @@
// }
// return result;
- _sp += arg_size(); // restore stack pointer
- Node* y = pop_math_arg();
- Node* x = pop_math_arg();
+ Node* x = round_double_node(argument(0));
+ Node* y = round_double_node(argument(2));
Node* result = NULL;
if (!too_many_traps(Deoptimization::Reason_intrinsic)) {
// Short form: skip the fancy tests and just check for NaN result.
- result = _gvn.transform( new (C) PowDNode(0, x, y) );
+ result = _gvn.transform(new (C) PowDNode(0, x, y));
} else {
// If this inlining ever returned NaN in the past, include all
// checks + call to the runtime.
@@ -1919,55 +1790,23 @@
// Post merge
set_control(_gvn.transform(r));
record_for_igvn(r);
- result=_gvn.transform(phi);
+ result = _gvn.transform(phi);
}
finish_pow_exp(result, x, y, OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW");
C->set_has_split_ifs(true); // Has chance for split-if optimization
-
- return true;
-}
-
-//------------------------------inline_trans-------------------------------------
-// Inline transcendental instructions, if possible. The Intel hardware gets
-// these right, no funny corner cases missed.
-bool LibraryCallKit::inline_trans(vmIntrinsics::ID id) {
- _sp += arg_size(); // restore stack pointer
- Node* arg = pop_math_arg();
- Node* trans = NULL;
-
- switch (id) {
- case vmIntrinsics::_dlog:
- trans = _gvn.transform((Node*)new (C) LogDNode(arg));
- break;
- case vmIntrinsics::_dlog10:
- trans = _gvn.transform((Node*)new (C) Log10DNode(arg));
- break;
- default:
- assert(false, "bad intrinsic was passed in");
- return false;
- }
-
- // Push result back on JVM stack
- push_pair(trans);
return true;
}
//------------------------------runtime_math-----------------------------
bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName) {
- Node* a = NULL;
- Node* b = NULL;
-
assert(call_type == OptoRuntime::Math_DD_D_Type() || call_type == OptoRuntime::Math_D_D_Type(),
"must be (DD)D or (D)D type");
// Inputs
- _sp += arg_size(); // restore stack pointer
- if (call_type == OptoRuntime::Math_DD_D_Type()) {
- b = pop_math_arg();
- }
- a = pop_math_arg();
+ Node* a = round_double_node(argument(0));
+ Node* b = (call_type == OptoRuntime::Math_DD_D_Type()) ? round_double_node(argument(2)) : NULL;
const TypePtr* no_memory_effects = NULL;
Node* trig = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName,
@@ -1979,43 +1818,43 @@
assert(value_top == top(), "second value must be top");
#endif
- push_pair(value);
+ set_result(value);
return true;
}
//------------------------------inline_math_native-----------------------------
bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) {
+#define FN_PTR(f) CAST_FROM_FN_PTR(address, f)
switch (id) {
// These intrinsics are not properly supported on all hardware
- case vmIntrinsics::_dcos: return Matcher::has_match_rule(Op_CosD) ? inline_trig(id) :
- runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dcos), "COS");
- case vmIntrinsics::_dsin: return Matcher::has_match_rule(Op_SinD) ? inline_trig(id) :
- runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dsin), "SIN");
- case vmIntrinsics::_dtan: return Matcher::has_match_rule(Op_TanD) ? inline_trig(id) :
- runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dtan), "TAN");
-
- case vmIntrinsics::_dlog: return Matcher::has_match_rule(Op_LogD) ? inline_trans(id) :
- runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog), "LOG");
- case vmIntrinsics::_dlog10: return Matcher::has_match_rule(Op_Log10D) ? inline_trans(id) :
- runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), "LOG10");
+ case vmIntrinsics::_dcos: return Matcher::has_match_rule(Op_CosD) ? inline_trig(id) :
+ runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dcos), "COS");
+ case vmIntrinsics::_dsin: return Matcher::has_match_rule(Op_SinD) ? inline_trig(id) :
+ runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dsin), "SIN");
+ case vmIntrinsics::_dtan: return Matcher::has_match_rule(Op_TanD) ? inline_trig(id) :
+ runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dtan), "TAN");
+
+ case vmIntrinsics::_dlog: return Matcher::has_match_rule(Op_LogD) ? inline_math(id) :
+ runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog), "LOG");
+ case vmIntrinsics::_dlog10: return Matcher::has_match_rule(Op_Log10D) ? inline_math(id) :
+ runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog10), "LOG10");
// These intrinsics are supported on all hardware
- case vmIntrinsics::_dsqrt: return Matcher::has_match_rule(Op_SqrtD) ? inline_sqrt(id) : false;
- case vmIntrinsics::_dabs: return Matcher::has_match_rule(Op_AbsD) ? inline_abs(id) : false;
-
- case vmIntrinsics::_dexp: return
- Matcher::has_match_rule(Op_ExpD) ? inline_exp(id) :
- runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
- case vmIntrinsics::_dpow: return
- Matcher::has_match_rule(Op_PowD) ? inline_pow(id) :
- runtime_math(OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW");
+ case vmIntrinsics::_dsqrt: return Matcher::has_match_rule(Op_SqrtD) ? inline_math(id) : false;
+ case vmIntrinsics::_dabs: return Matcher::has_match_rule(Op_AbsD) ? inline_math(id) : false;
+
+ case vmIntrinsics::_dexp: return Matcher::has_match_rule(Op_ExpD) ? inline_exp() :
+ runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dexp), "EXP");
+ case vmIntrinsics::_dpow: return Matcher::has_match_rule(Op_PowD) ? inline_pow() :
+ runtime_math(OptoRuntime::Math_DD_D_Type(), FN_PTR(SharedRuntime::dpow), "POW");
+#undef FN_PTR
// These intrinsics are not yet correctly implemented
case vmIntrinsics::_datan2:
return false;
default:
- ShouldNotReachHere();
+ fatal_unexpected_iid(id);
return false;
}
}
@@ -2030,8 +1869,7 @@
//----------------------------inline_min_max-----------------------------------
bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) {
- push(generate_min_max(id, argument(0), argument(1)));
-
+ set_result(generate_min_max(id, argument(0), argument(1)));
return true;
}
@@ -2254,99 +2092,37 @@
}
}
-//-------------------inline_numberOfLeadingZeros_int/long-----------------------
-// inline int Integer.numberOfLeadingZeros(int)
-// inline int Long.numberOfLeadingZeros(long)
-bool LibraryCallKit::inline_numberOfLeadingZeros(vmIntrinsics::ID id) {
- assert(id == vmIntrinsics::_numberOfLeadingZeros_i || id == vmIntrinsics::_numberOfLeadingZeros_l, "not numberOfLeadingZeros");
- if (id == vmIntrinsics::_numberOfLeadingZeros_i && !Matcher::match_rule_supported(Op_CountLeadingZerosI)) return false;
- if (id == vmIntrinsics::_numberOfLeadingZeros_l && !Matcher::match_rule_supported(Op_CountLeadingZerosL)) return false;
- _sp += arg_size(); // restore stack pointer
+//--------------------------inline_number_methods-----------------------------
+// inline int Integer.numberOfLeadingZeros(int)
+// inline int Long.numberOfLeadingZeros(long)
+//
+// inline int Integer.numberOfTrailingZeros(int)
+// inline int Long.numberOfTrailingZeros(long)
+//
+// inline int Integer.bitCount(int)
+// inline int Long.bitCount(long)
+//
+// inline char Character.reverseBytes(char)
+// inline short Short.reverseBytes(short)
+// inline int Integer.reverseBytes(int)
+// inline long Long.reverseBytes(long)
+bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) {
+ Node* arg = argument(0);
+ Node* n;
switch (id) {
- case vmIntrinsics::_numberOfLeadingZeros_i:
- push(_gvn.transform(new (C) CountLeadingZerosINode(pop())));
- break;
- case vmIntrinsics::_numberOfLeadingZeros_l:
- push(_gvn.transform(new (C) CountLeadingZerosLNode(pop_pair())));
- break;
- default:
- ShouldNotReachHere();
- }
- return true;
-}
-
-//-------------------inline_numberOfTrailingZeros_int/long----------------------
-// inline int Integer.numberOfTrailingZeros(int)
-// inline int Long.numberOfTrailingZeros(long)
-bool LibraryCallKit::inline_numberOfTrailingZeros(vmIntrinsics::ID id) {
- assert(id == vmIntrinsics::_numberOfTrailingZeros_i || id == vmIntrinsics::_numberOfTrailingZeros_l, "not numberOfTrailingZeros");
- if (id == vmIntrinsics::_numberOfTrailingZeros_i && !Matcher::match_rule_supported(Op_CountTrailingZerosI)) return false;
- if (id == vmIntrinsics::_numberOfTrailingZeros_l && !Matcher::match_rule_supported(Op_CountTrailingZerosL)) return false;
- _sp += arg_size(); // restore stack pointer
- switch (id) {
- case vmIntrinsics::_numberOfTrailingZeros_i:
- push(_gvn.transform(new (C) CountTrailingZerosINode(pop())));
- break;
- case vmIntrinsics::_numberOfTrailingZeros_l:
- push(_gvn.transform(new (C) CountTrailingZerosLNode(pop_pair())));
- break;
- default:
- ShouldNotReachHere();
+ case vmIntrinsics::_numberOfLeadingZeros_i: n = new (C) CountLeadingZerosINode( arg); break;
+ case vmIntrinsics::_numberOfLeadingZeros_l: n = new (C) CountLeadingZerosLNode( arg); break;
+ case vmIntrinsics::_numberOfTrailingZeros_i: n = new (C) CountTrailingZerosINode(arg); break;
+ case vmIntrinsics::_numberOfTrailingZeros_l: n = new (C) CountTrailingZerosLNode(arg); break;
+ case vmIntrinsics::_bitCount_i: n = new (C) PopCountINode( arg); break;
+ case vmIntrinsics::_bitCount_l: n = new (C) PopCountLNode( arg); break;
+ case vmIntrinsics::_reverseBytes_c: n = new (C) ReverseBytesUSNode(0, arg); break;
+ case vmIntrinsics::_reverseBytes_s: n = new (C) ReverseBytesSNode( 0, arg); break;
+ case vmIntrinsics::_reverseBytes_i: n = new (C) ReverseBytesINode( 0, arg); break;
+ case vmIntrinsics::_reverseBytes_l: n = new (C) ReverseBytesLNode( 0, arg); break;
+ default: fatal_unexpected_iid(id); break;
}
- return true;
-}
-
-//----------------------------inline_bitCount_int/long-----------------------
-// inline int Integer.bitCount(int)
-// inline int Long.bitCount(long)
-bool LibraryCallKit::inline_bitCount(vmIntrinsics::ID id) {
- assert(id == vmIntrinsics::_bitCount_i || id == vmIntrinsics::_bitCount_l, "not bitCount");
- if (id == vmIntrinsics::_bitCount_i && !Matcher::has_match_rule(Op_PopCountI)) return false;
- if (id == vmIntrinsics::_bitCount_l && !Matcher::has_match_rule(Op_PopCountL)) return false;
- _sp += arg_size(); // restore stack pointer
- switch (id) {
- case vmIntrinsics::_bitCount_i:
- push(_gvn.transform(new (C) PopCountINode(pop())));
- break;
- case vmIntrinsics::_bitCount_l:
- push(_gvn.transform(new (C) PopCountLNode(pop_pair())));
- break;
- default:
- ShouldNotReachHere();
- }
- return true;
-}
-
-//----------------------------inline_reverseBytes_int/long/char/short-------------------
-// inline Integer.reverseBytes(int)
-// inline Long.reverseBytes(long)
-// inline Character.reverseBytes(char)
-// inline Short.reverseBytes(short)
-bool LibraryCallKit::inline_reverseBytes(vmIntrinsics::ID id) {
- assert(id == vmIntrinsics::_reverseBytes_i || id == vmIntrinsics::_reverseBytes_l ||
- id == vmIntrinsics::_reverseBytes_c || id == vmIntrinsics::_reverseBytes_s,
- "not reverse Bytes");
- if (id == vmIntrinsics::_reverseBytes_i && !Matcher::has_match_rule(Op_ReverseBytesI)) return false;
- if (id == vmIntrinsics::_reverseBytes_l && !Matcher::has_match_rule(Op_ReverseBytesL)) return false;
- if (id == vmIntrinsics::_reverseBytes_c && !Matcher::has_match_rule(Op_ReverseBytesUS)) return false;
- if (id == vmIntrinsics::_reverseBytes_s && !Matcher::has_match_rule(Op_ReverseBytesS)) return false;
- _sp += arg_size(); // restore stack pointer
- switch (id) {
- case vmIntrinsics::_reverseBytes_i:
- push(_gvn.transform(new (C) ReverseBytesINode(0, pop())));
- break;
- case vmIntrinsics::_reverseBytes_l:
- push_pair(_gvn.transform(new (C) ReverseBytesLNode(0, pop_pair())));
- break;
- case vmIntrinsics::_reverseBytes_c:
- push(_gvn.transform(new (C) ReverseBytesUSNode(0, pop())));
- break;
- case vmIntrinsics::_reverseBytes_s:
- push(_gvn.transform(new (C) ReverseBytesSNode(0, pop())));
- break;
- default:
- ;
- }
+ set_result(_gvn.transform(n));
return true;
}
@@ -2356,7 +2132,7 @@
// Helper that guards and inserts a pre-barrier.
void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset,
- Node* pre_val, int nargs, bool need_mem_bar) {
+ Node* pre_val, bool need_mem_bar) {
// We could be accessing the referent field of a reference object. If so, when G1
// is enabled, we need to log the value in the referent field in an SATB buffer.
// This routine performs some compile time filters and generates suitable
@@ -2406,8 +2182,8 @@
// }
// }
- float likely = PROB_LIKELY(0.999);
- float unlikely = PROB_UNLIKELY(0.999);
+ float likely = PROB_LIKELY( 0.999);
+ float unlikely = PROB_UNLIKELY(0.999);
IdealKit ideal(this);
#define __ ideal.
@@ -2419,9 +2195,7 @@
sync_kit(ideal);
Node* ref_klass_con = makecon(TypeKlassPtr::make(env()->Reference_klass()));
- _sp += nargs; // gen_instanceof might do an uncommon trap
Node* is_instof = gen_instanceof(base_oop, ref_klass_con);
- _sp -= nargs;
// Update IdealKit memory and control from graphKit.
__ sync_kit(this);
@@ -2505,7 +2279,7 @@
{
ResourceMark rm;
// Check the signatures.
- ciSignature* sig = signature();
+ ciSignature* sig = callee()->signature();
#ifdef ASSERT
if (!is_store) {
// Object getObject(Object base, int/long offset), etc.
@@ -2543,42 +2317,19 @@
C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
- int type_words = type2size[ (type == T_ADDRESS) ? T_LONG : type ];
-
- // Argument words: "this" plus (oop/offset) or (lo/hi) args plus maybe 1 or 2 value words
- int nargs = 1 + (is_native_ptr ? 2 : 3) + (is_store ? type_words : 0);
- assert(callee()->arg_size() == nargs, "must be");
-
- debug_only(int saved_sp = _sp);
- _sp += nargs;
-
- Node* val;
- debug_only(val = (Node*)(uintptr_t)-1);
-
-
- if (is_store) {
- // Get the value being stored. (Pop it first; it was pushed last.)
- switch (type) {
- case T_DOUBLE:
- case T_LONG:
- case T_ADDRESS:
- val = pop_pair();
- break;
- default:
- val = pop();
- }
- }
+ Node* receiver = argument(0); // type: oop
// Build address expression. See the code in inline_unsafe_prefetch.
- Node *adr;
- Node *heap_base_oop = top();
+ Node* adr;
+ Node* heap_base_oop = top();
Node* offset = top();
+ Node* val;
if (!is_native_ptr) {
+ // The base is either a Java object or a value produced by Unsafe.staticFieldBase
+ Node* base = argument(1); // type: oop
// The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
- offset = pop_pair();
- // The base is either a Java object or a value produced by Unsafe.staticFieldBase
- Node* base = pop();
+ offset = argument(2); // type: long
// We currently rely on the cookies produced by Unsafe.xxxFieldOffset
// to be plain byte offsets, which are also the same as those accepted
// by oopDesc::field_base.
@@ -2588,18 +2339,14 @@
offset = ConvL2X(offset);
adr = make_unsafe_address(base, offset);
heap_base_oop = base;
+ val = is_store ? argument(4) : NULL;
} else {
- Node* ptr = pop_pair();
- // Adjust Java long to machine word:
- ptr = ConvL2X(ptr);
+ Node* ptr = argument(1); // type: long
+ ptr = ConvL2X(ptr); // adjust Java long to machine word
adr = make_unsafe_address(NULL, ptr);
+ val = is_store ? argument(3) : NULL;
}
- // Pop receiver last: it was pushed first.
- Node *receiver = pop();
-
- assert(saved_sp == _sp, "must have correct argument count");
-
const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
// First guess at the value type.
@@ -2633,13 +2380,7 @@
}
}
- // Null check on self without removing any arguments. The argument
- // null check technically happens in the wrong place, which can lead to
- // invalid stack traces when the primitive is inlined into a method
- // which handles NullPointerExceptions.
- _sp += nargs;
- do_null_check(receiver, T_OBJECT);
- _sp -= nargs;
+ receiver = null_check(receiver);
if (stopped()) {
return true;
}
@@ -2671,34 +2412,36 @@
if (!is_store) {
Node* p = make_load(control(), adr, value_type, type, adr_type, is_volatile);
- // load value and push onto stack
+ // load value
switch (type) {
case T_BOOLEAN:
case T_CHAR:
case T_BYTE:
case T_SHORT:
case T_INT:
+ case T_LONG:
case T_FLOAT:
- push(p);
+ case T_DOUBLE:
break;
case T_OBJECT:
if (need_read_barrier) {
- insert_pre_barrier(heap_base_oop, offset, p, nargs, !(is_volatile || need_mem_bar));
+ insert_pre_barrier(heap_base_oop, offset, p, !(is_volatile || need_mem_bar));
}
- push(p);
break;
case T_ADDRESS:
// Cast to an int type.
- p = _gvn.transform( new (C) CastP2XNode(NULL,p) );
+ p = _gvn.transform(new (C) CastP2XNode(NULL, p));
p = ConvX2L(p);
- push_pair(p);
+ break;
+ default:
+ fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
break;
- case T_DOUBLE:
- case T_LONG:
- push_pair( p );
- break;
- default: ShouldNotReachHere();
}
+ // The load node has the control of the preceding MemBarCPUOrder. All
+ // following nodes will have the control of the MemBarCPUOrder inserted at
+ // the end of this method. So, pushing the load onto the stack at a later
+ // point is fine.
+ set_result(p);
} else {
// place effect of store into memory
switch (type) {
@@ -2762,7 +2505,7 @@
{
ResourceMark rm;
// Check the signatures.
- ciSignature* sig = signature();
+ ciSignature* sig = callee()->signature();
#ifdef ASSERT
// Object getObject(Object base, int/long offset), etc.
BasicType rtype = sig->return_type()->basic_type();
@@ -2780,19 +2523,21 @@
C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
- // Argument words: "this" if not static, plus (oop/offset) or (lo/hi) args
- int nargs = (is_static ? 0 : 1) + (is_native_ptr ? 2 : 3);
-
- debug_only(int saved_sp = _sp);
- _sp += nargs;
+ const int idx = is_static ? 0 : 1;
+ if (!is_static) {
+ null_check_receiver();
+ if (stopped()) {
+ return true;
+ }
+ }
// Build address expression. See the code in inline_unsafe_access.
Node *adr;
if (!is_native_ptr) {
+ // The base is either a Java object or a value produced by Unsafe.staticFieldBase
+ Node* base = argument(idx + 0); // type: oop
// The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
- Node* offset = pop_pair();
- // The base is either a Java object or a value produced by Unsafe.staticFieldBase
- Node* base = pop();
+ Node* offset = argument(idx + 1); // type: long
// We currently rely on the cookies produced by Unsafe.xxxFieldOffset
// to be plain byte offsets, which are also the same as those accepted
// by oopDesc::field_base.
@@ -2802,31 +2547,11 @@
offset = ConvL2X(offset);
adr = make_unsafe_address(base, offset);
} else {
- Node* ptr = pop_pair();
- // Adjust Java long to machine word:
- ptr = ConvL2X(ptr);
+ Node* ptr = argument(idx + 0); // type: long
+ ptr = ConvL2X(ptr); // adjust Java long to machine word
adr = make_unsafe_address(NULL, ptr);
}
- if (is_static) {
- assert(saved_sp == _sp, "must have correct argument count");
- } else {
- // Pop receiver last: it was pushed first.
- Node *receiver = pop();
- assert(saved_sp == _sp, "must have correct argument count");
-
- // Null check on self without removing any arguments. The argument
- // null check technically happens in the wrong place, which can lead to
- // invalid stack traces when the primitive is inlined into a method
- // which handles NullPointerExceptions.
- _sp += nargs;
- do_null_check(receiver, T_OBJECT);
- _sp -= nargs;
- if (stopped()) {
- return true;
- }
- }
-
// Generate the read or write prefetch
Node *prefetch;
if (is_store) {
@@ -2841,7 +2566,22 @@
}
//----------------------------inline_unsafe_load_store----------------------------
-
+// This method serves a couple of different customers (depending on LoadStoreKind):
+//
+// LS_cmpxchg:
+// public final native boolean compareAndSwapObject(Object o, long offset, Object expected, Object x);
+// public final native boolean compareAndSwapInt( Object o, long offset, int expected, int x);
+// public final native boolean compareAndSwapLong( Object o, long offset, long expected, long x);
+//
+// LS_xadd:
+// public int getAndAddInt( Object o, long offset, int delta)
+// public long getAndAddLong(Object o, long offset, long delta)
+//
+// LS_xchg:
+// int getAndSet(Object o, long offset, int newValue)
+// long getAndSet(Object o, long offset, long newValue)
+// Object getAndSet(Object o, long offset, Object newValue)
+//
bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind) {
// This basic scheme here is the same as inline_unsafe_access, but
// differs in enough details that combining them would make the code
@@ -2856,7 +2596,8 @@
BasicType rtype;
{
ResourceMark rm;
- ciSignature* sig = signature();
+ // Check the signatures.
+ ciSignature* sig = callee()->signature();
rtype = sig->return_type()->basic_type();
if (kind == LS_xadd || kind == LS_xchg) {
// Check the signatures.
@@ -2881,28 +2622,31 @@
}
#endif //PRODUCT
- // number of stack slots per value argument (1 or 2)
- int type_words = type2size[type];
-
C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
- // Argument words: "this" plus oop plus offset (plus oldvalue) plus newvalue/delta;
- int nargs = 1 + 1 + 2 + ((kind == LS_cmpxchg) ? type_words : 0) + type_words;
-
- // pop arguments: newval, offset, base, and receiver
- debug_only(int saved_sp = _sp);
- _sp += nargs;
- Node* newval = (type_words == 1) ? pop() : pop_pair();
- Node* oldval = (kind == LS_cmpxchg) ? ((type_words == 1) ? pop() : pop_pair()) : NULL;
- Node *offset = pop_pair();
- Node *base = pop();
- Node *receiver = pop();
- assert(saved_sp == _sp, "must have correct argument count");
-
- // Null check receiver.
- _sp += nargs;
- do_null_check(receiver, T_OBJECT);
- _sp -= nargs;
+ // Get arguments:
+ Node* receiver = NULL;
+ Node* base = NULL;
+ Node* offset = NULL;
+ Node* oldval = NULL;
+ Node* newval = NULL;
+ if (kind == LS_cmpxchg) {
+ const bool two_slot_type = type2size[type] == 2;
+ receiver = argument(0); // type: oop
+ base = argument(1); // type: oop
+ offset = argument(2); // type: long
+ oldval = argument(4); // type: oop, int, or long
+ newval = argument(two_slot_type ? 6 : 5); // type: oop, int, or long
+ } else if (kind == LS_xadd || kind == LS_xchg){
+ receiver = argument(0); // type: oop
+ base = argument(1); // type: oop
+ offset = argument(2); // type: long
+ oldval = NULL;
+ newval = argument(4); // type: oop, int, or long
+ }
+
+ // Null check receiver.
+ receiver = null_check(receiver);
if (stopped()) {
return true;
}
@@ -3008,7 +2752,7 @@
post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
break;
default:
- ShouldNotReachHere();
+ fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
break;
}
@@ -3029,10 +2773,14 @@
#endif
assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
- push_node(load_store->bottom_type()->basic_type(), load_store);
+ set_result(load_store);
return true;
}
+//----------------------------inline_unsafe_ordered_store----------------------
+// public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
+// public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
+// public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
// This is another variant of inline_unsafe_access, differing in
// that it always issues store-store ("release") barrier and ensures
@@ -3044,7 +2792,7 @@
{
ResourceMark rm;
// Check the signatures.
- ciSignature* sig = signature();
+ ciSignature* sig = callee()->signature();
#ifdef ASSERT
BasicType rtype = sig->return_type()->basic_type();
assert(rtype == T_VOID, "must return void");
@@ -3055,27 +2803,16 @@
}
#endif //PRODUCT
- // number of stack slots per value argument (1 or 2)
- int type_words = type2size[type];
-
C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
- // Argument words: "this" plus oop plus offset plus value;
- int nargs = 1 + 1 + 2 + type_words;
-
- // pop arguments: val, offset, base, and receiver
- debug_only(int saved_sp = _sp);
- _sp += nargs;
- Node* val = (type_words == 1) ? pop() : pop_pair();
- Node *offset = pop_pair();
- Node *base = pop();
- Node *receiver = pop();
- assert(saved_sp == _sp, "must have correct argument count");
-
- // Null check receiver.
- _sp += nargs;
- do_null_check(receiver, T_OBJECT);
- _sp -= nargs;
+ // Get arguments:
+ Node* receiver = argument(0); // type: oop
+ Node* base = argument(1); // type: oop
+ Node* offset = argument(2); // type: long
+ Node* val = argument(4); // type: oop, int, or long
+
+ // Null check receiver.
+ receiver = null_check(receiver);
if (stopped()) {
return true;
}
@@ -3092,7 +2829,7 @@
insert_mem_bar(Op_MemBarRelease);
insert_mem_bar(Op_MemBarCPUOrder);
// Ensure that the store is atomic for longs:
- bool require_atomic_access = true;
+ const bool require_atomic_access = true;
Node* store;
if (type == T_OBJECT) // reference stores need a store barrier.
store = store_oop_to_unknown(control(), base, adr, adr_type, val, type);
@@ -3103,20 +2840,17 @@
return true;
}
+//----------------------------inline_unsafe_allocate---------------------------
+// public native Object sun.mics.Unsafe.allocateInstance(Class<?> cls);
bool LibraryCallKit::inline_unsafe_allocate() {
if (callee()->is_static()) return false; // caller must have the capability!
- int nargs = 1 + 1;
- assert(signature()->size() == nargs-1, "alloc has 1 argument");
- null_check_receiver(callee()); // check then ignore argument(0)
- _sp += nargs; // set original stack for use by uncommon_trap
- Node* cls = do_null_check(argument(1), T_OBJECT);
- _sp -= nargs;
+
+ null_check_receiver(); // null-check, then ignore
+ Node* cls = null_check(argument(1));
if (stopped()) return true;
- Node* kls = load_klass_from_mirror(cls, false, nargs, NULL, 0);
- _sp += nargs; // set original stack for use by uncommon_trap
- kls = do_null_check(kls, T_OBJECT);
- _sp -= nargs;
+ Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
+ kls = null_check(kls);
if (stopped()) return true; // argument was like int.class
// Note: The argument might still be an illegal value like
@@ -3127,12 +2861,11 @@
// can generate code to load it as unsigned byte.
Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN);
Node* bits = intcon(InstanceKlass::fully_initialized);
- Node* test = _gvn.transform( new (C) SubINode(inst, bits) );
+ Node* test = _gvn.transform(new (C) SubINode(inst, bits));
// The 'test' is non-zero if we need to take a slow path.
Node* obj = new_instance(kls, test);
- push(obj);
-
+ set_result(obj);
return true;
}
@@ -3143,15 +2876,10 @@
* return myklass->trace_id & ~0x3
*/
bool LibraryCallKit::inline_native_classID() {
- int nargs = 1 + 1;
- null_check_receiver(callee()); // check then ignore argument(0)
- _sp += nargs;
- Node* cls = do_null_check(argument(1), T_OBJECT);
- _sp -= nargs;
- Node* kls = load_klass_from_mirror(cls, false, nargs, NULL, 0);
- _sp += nargs;
- kls = do_null_check(kls, T_OBJECT);
- _sp -= nargs;
+ null_check_receiver(); // null-check, then ignore
+ Node* cls = null_check(argument(1), T_OBJECT);
+ Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
+ kls = null_check(kls, T_OBJECT);
ByteSize offset = TRACE_ID_OFFSET;
Node* insp = basic_plus_adr(kls, in_bytes(offset));
Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG);
@@ -3162,7 +2890,7 @@
const TypePtr *adr_type = _gvn.type(insp)->isa_ptr();
store_to_memory(control(), insp, orl, T_LONG, adr_type);
- push_pair(andl);
+ set_result(andl);
return true;
}
@@ -3177,13 +2905,12 @@
size_t thread_id_size = OSThread::thread_id_size();
if (thread_id_size == (size_t) BytesPerLong) {
threadid = ConvL2I(make_load(control(), p, TypeLong::LONG, T_LONG));
- push(threadid);
} else if (thread_id_size == (size_t) BytesPerInt) {
threadid = make_load(control(), p, TypeInt::INT, T_INT);
- push(threadid);
} else {
ShouldNotReachHere();
}
+ set_result(threadid);
return true;
}
#endif
@@ -3192,29 +2919,28 @@
// inline code for System.currentTimeMillis() and System.nanoTime()
// these have the same type and signature
bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
- const TypeFunc *tf = OptoRuntime::void_long_Type();
+ const TypeFunc* tf = OptoRuntime::void_long_Type();
const TypePtr* no_memory_effects = NULL;
Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
Node* value = _gvn.transform(new (C) ProjNode(time, TypeFunc::Parms+0));
#ifdef ASSERT
- Node* value_top = _gvn.transform(new (C) ProjNode(time, TypeFunc::Parms + 1));
+ Node* value_top = _gvn.transform(new (C) ProjNode(time, TypeFunc::Parms+1));
assert(value_top == top(), "second value must be top");
#endif
- push_pair(value);
+ set_result(value);
return true;
}
//------------------------inline_native_currentThread------------------
bool LibraryCallKit::inline_native_currentThread() {
Node* junk = NULL;
- push(generate_current_thread(junk));
+ set_result(generate_current_thread(junk));
return true;
}
//------------------------inline_native_isInterrupted------------------
+// private native boolean java.lang.Thread.isInterrupted(boolean ClearInterrupted);
bool LibraryCallKit::inline_native_isInterrupted() {
- const int nargs = 1+1; // receiver + boolean
- assert(nargs == arg_size(), "sanity");
// Add a fast path to t.isInterrupted(clear_int):
// (t == Thread.current() && (!TLS._osthread._interrupted || !clear_int))
// ? TLS._osthread._interrupted : /*slow path:*/ t.isInterrupted(clear_int)
@@ -3312,9 +3038,8 @@
set_i_o( _gvn.transform(io_phi) );
}
- push_result(result_rgn, result_val);
C->set_has_split_ifs(true); // Has chance for split-if optimization
-
+ set_result(result_rgn, result_val);
return true;
}
@@ -3334,7 +3059,6 @@
// If the region is NULL, force never_see_null = true.
Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
bool never_see_null,
- int nargs,
RegionNode* region,
int null_path,
int offset) {
@@ -3342,7 +3066,6 @@
Node* p = basic_plus_adr(mirror, offset);
const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type) );
- _sp += nargs; // any deopt will start just before call to enclosing method
Node* null_ctl = top();
kls = null_check_oop(kls, &null_ctl, never_see_null);
if (region != NULL) {
@@ -3351,7 +3074,6 @@
} else {
assert(null_ctl == top(), "no loose ends");
}
- _sp -= nargs;
return kls;
}
@@ -3376,7 +3098,6 @@
//-------------------------inline_native_Class_query-------------------
bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
- int nargs = 1+0; // just the Class mirror, in most cases
const Type* return_type = TypeInt::BOOL;
Node* prim_return_value = top(); // what happens if it's a primitive class?
bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
@@ -3384,11 +3105,14 @@
enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
+ Node* mirror = argument(0);
+ Node* obj = top();
+
switch (id) {
case vmIntrinsics::_isInstance:
- nargs = 1+1; // the Class mirror, plus the object getting queried about
// nothing is an instance of a primitive type
prim_return_value = intcon(0);
+ obj = argument(1);
break;
case vmIntrinsics::_getModifiers:
prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
@@ -3419,12 +3143,10 @@
return_type = TypeInt::INT; // not bool! 6297094
break;
default:
- ShouldNotReachHere();
+ fatal_unexpected_iid(id);
+ break;
}
- Node* mirror = argument(0);
- Node* obj = (nargs <= 1)? top(): argument(1);
-
const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
if (mirror_con == NULL) return false; // cannot happen?
@@ -3451,9 +3173,7 @@
// For Reflection.getClassAccessFlags(), the null check occurs in
// the wrong place; see inline_unsafe_access(), above, for a similar
// situation.
- _sp += nargs; // set original stack for use by uncommon_trap
- mirror = do_null_check(mirror, T_OBJECT);
- _sp -= nargs;
+ mirror = null_check(mirror);
// If mirror or obj is dead, only null-path is taken.
if (stopped()) return true;
@@ -3461,11 +3181,10 @@
// Now load the mirror's klass metaobject, and null-check it.
// Side-effects region with the control path if the klass is null.
- Node* kls = load_klass_from_mirror(mirror, never_see_null, nargs,
- region, _prim_path);
+ Node* kls = load_klass_from_mirror(mirror, never_see_null, region, _prim_path);
// If kls is null, we have a primitive mirror.
phi->init_req(_prim_path, prim_return_value);
- if (stopped()) { push_result(region, phi); return true; }
+ if (stopped()) { set_result(region, phi); return true; }
Node* p; // handy temp
Node* null_ctl;
@@ -3476,9 +3195,7 @@
switch (id) {
case vmIntrinsics::_isInstance:
// nothing is an instance of a primitive type
- _sp += nargs; // gen_instanceof might do an uncommon trap
query_value = gen_instanceof(obj, kls);
- _sp -= nargs;
break;
case vmIntrinsics::_getModifiers:
@@ -3553,16 +3270,16 @@
break;
default:
- ShouldNotReachHere();
+ fatal_unexpected_iid(id);
+ break;
}
// Fall-through is the normal case of a query to a real class.
phi->init_req(1, query_value);
region->init_req(1, control());
- push_result(region, phi);
C->set_has_split_ifs(true); // Has chance for split-if optimization
-
+ set_result(region, phi);
return true;
}
@@ -3570,8 +3287,6 @@
// This intrinsic takes the JNI calls out of the heart of
// UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
bool LibraryCallKit::inline_native_subtype_check() {
- int nargs = 1+1; // the Class mirror, plus the other class getting examined
-
// Pull both arguments off the stack.
Node* args[2]; // two java.lang.Class mirrors: superc, subc
args[0] = argument(0);
@@ -3602,9 +3317,7 @@
int which_arg;
for (which_arg = 0; which_arg <= 1; which_arg++) {
Node* arg = args[which_arg];
- _sp += nargs; // set original stack for use by uncommon_trap
- arg = do_null_check(arg, T_OBJECT);
- _sp -= nargs;
+ arg = null_check(arg);
if (stopped()) break;
args[which_arg] = _gvn.transform(arg);
@@ -3618,9 +3331,7 @@
for (which_arg = 0; which_arg <= 1; which_arg++) {
Node* kls = klasses[which_arg];
Node* null_ctl = top();
- _sp += nargs; // set original stack for use by uncommon_trap
kls = null_check_oop(kls, &null_ctl, never_see_null);
- _sp -= nargs;
int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
region->init_req(prim_path, null_ctl);
if (stopped()) break;
@@ -3670,8 +3381,7 @@
}
set_control(_gvn.transform(region));
- push(_gvn.transform(phi));
-
+ set_result(_gvn.transform(phi));
return true;
}
@@ -3719,14 +3429,12 @@
//-----------------------inline_native_newArray--------------------------
+// private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
bool LibraryCallKit::inline_native_newArray() {
- int nargs = 2;
Node* mirror = argument(0);
Node* count_val = argument(1);
- _sp += nargs; // set original stack for use by uncommon_trap
- mirror = do_null_check(mirror, T_OBJECT);
- _sp -= nargs;
+ mirror = null_check(mirror);
// If mirror or obj is dead, only null-path is taken.
if (stopped()) return true;
@@ -3740,7 +3448,6 @@
bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null,
- nargs,
result_reg, _slow_path);
Node* normal_ctl = control();
Node* no_array_ctl = result_reg->in(_slow_path);
@@ -3767,7 +3474,7 @@
// Normal case: The array type has been cached in the java.lang.Class.
// The following call works fine even if the array type is polymorphic.
// It could be a dynamic mix of int[], boolean[], Object[], etc.
- Node* obj = new_array(klass_node, count_val, nargs);
+ Node* obj = new_array(klass_node, count_val, 0); // no arguments to push
result_reg->init_req(_normal_path, control());
result_val->init_req(_normal_path, obj);
result_io ->init_req(_normal_path, i_o());
@@ -3777,23 +3484,18 @@
// Return the combined state.
set_i_o( _gvn.transform(result_io) );
set_all_memory( _gvn.transform(result_mem) );
- push_result(result_reg, result_val);
+
C->set_has_split_ifs(true); // Has chance for split-if optimization
-
+ set_result(result_reg, result_val);
return true;
}
//----------------------inline_native_getLength--------------------------
+// public static native int java.lang.reflect.Array.getLength(Object array);
bool LibraryCallKit::inline_native_getLength() {
if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
- int nargs = 1;
- Node* array = argument(0);
-
- _sp += nargs; // set original stack for use by uncommon_trap
- array = do_null_check(array, T_OBJECT);
- _sp -= nargs;
-
+ Node* array = null_check(argument(0));
// If array is dead, only null-path is taken.
if (stopped()) return true;
@@ -3803,7 +3505,6 @@
if (non_array != NULL) {
PreserveJVMState pjvms(this);
set_control(non_array);
- _sp += nargs; // push the arguments back on the stack
uncommon_trap(Deoptimization::Reason_intrinsic,
Deoptimization::Action_maybe_recompile);
}
@@ -3813,19 +3514,21 @@
// The works fine even if the array type is polymorphic.
// It could be a dynamic mix of int[], boolean[], Object[], etc.
- push( load_array_length(array) );
-
- C->set_has_split_ifs(true); // Has chance for split-if optimization
-
+ Node* result = load_array_length(array);
+
+ C->set_has_split_ifs(true); // Has chance for split-if optimization
+ set_result(result);
return true;
}
//------------------------inline_array_copyOf----------------------------
+// public static <T,U> T[] java.util.Arrays.copyOf( U[] original, int newLength, Class<? extends T[]> newType);
+// public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from, int to, Class<? extends T[]> newType);
bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
+ return false;
if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
- // Restore the stack and pop off the arguments.
- int nargs = 3 + (is_copyOfRange? 1: 0);
+ // Get the arguments.
Node* original = argument(0);
Node* start = is_copyOfRange? argument(1): intcon(0);
Node* end = is_copyOfRange? argument(2): argument(1);
@@ -3833,23 +3536,21 @@
Node* newcopy;
- //set the original stack and the reexecute bit for the interpreter to reexecute
- //the bytecode that invokes Arrays.copyOf if deoptimization happens
+ // Set the original stack and the reexecute bit for the interpreter to reexecute
+ // the bytecode that invokes Arrays.copyOf if deoptimization happens.
{ PreserveReexecuteState preexecs(this);
- _sp += nargs;
jvms()->set_should_reexecute(true);
- array_type_mirror = do_null_check(array_type_mirror, T_OBJECT);
- original = do_null_check(original, T_OBJECT);
+ array_type_mirror = null_check(array_type_mirror);
+ original = null_check(original);
// Check if a null path was taken unconditionally.
if (stopped()) return true;
Node* orig_length = load_array_length(original);
- Node* klass_node = load_klass_from_mirror(array_type_mirror, false, 0,
- NULL, 0);
- klass_node = do_null_check(klass_node, T_OBJECT);
+ Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
+ klass_node = null_check(klass_node);
RegionNode* bailout = new (C) RegionNode(1);
record_for_igvn(bailout);
@@ -3872,7 +3573,7 @@
Node* length = end;
if (_gvn.type(start) != TypeInt::ZERO) {
- length = _gvn.transform( new (C) SubINode(end, start) );
+ length = _gvn.transform(new (C) SubINode(end, start));
}
// Bail out if length is negative.
@@ -3883,19 +3584,18 @@
if (bailout->req() > 1) {
PreserveJVMState pjvms(this);
- set_control( _gvn.transform(bailout) );
+ set_control(_gvn.transform(bailout));
uncommon_trap(Deoptimization::Reason_intrinsic,
Deoptimization::Action_maybe_recompile);
}
if (!stopped()) {
-
// How many elements will we copy from the original?
// The answer is MinI(orig_length - start, length).
- Node* orig_tail = _gvn.transform( new(C) SubINode(orig_length, start) );
+ Node* orig_tail = _gvn.transform(new (C) SubINode(orig_length, start));
Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
- newcopy = new_array(klass_node, length, 0);
+ newcopy = new_array(klass_node, length, 0); // no argments to push
// Generate a direct call to the right arraycopy function(s).
// We know the copy is disjoint but we might not know if the
@@ -3910,14 +3610,12 @@
original, start, newcopy, intcon(0), moved,
disjoint_bases, length_never_negative);
}
- } //original reexecute and sp are set back here
-
- if(!stopped()) {
- push(newcopy);
- }
+ } // original reexecute is set back here
C->set_has_split_ifs(true); // Has chance for split-if optimization
-
+ if (!stopped()) {
+ set_result(newcopy);
+ }
return true;
}
@@ -3969,7 +3667,7 @@
SharedRuntime::get_resolve_static_call_stub(),
method, bci());
} else if (is_virtual) {
- null_check_receiver(method);
+ null_check_receiver();
int vtable_index = Method::invalid_vtable_index;
if (UseInlineCaches) {
// Suppress the vtable call
@@ -3983,7 +3681,7 @@
SharedRuntime::get_resolve_virtual_call_stub(),
method, vtable_index, bci());
} else { // neither virtual nor static: opt_virtual
- null_check_receiver(method);
+ null_check_receiver();
slow_call = new(C) CallStaticJavaNode(tf,
SharedRuntime::get_resolve_opt_virtual_call_stub(),
method, bci());
@@ -4012,7 +3710,7 @@
Node* obj = NULL;
if (!is_static) {
// Check for hashing null object
- obj = null_check_receiver(callee());
+ obj = null_check_receiver();
if (stopped()) return true; // unconditionally null
result_reg->init_req(_null_path, top());
result_val->init_req(_null_path, top());
@@ -4028,9 +3726,9 @@
// Unconditionally null? Then return right away.
if (stopped()) {
- set_control( result_reg->in(_null_path) );
+ set_control( result_reg->in(_null_path));
if (!stopped())
- push( result_val ->in(_null_path) );
+ set_result(result_val->in(_null_path));
return true;
}
@@ -4103,8 +3801,7 @@
if (!stopped()) {
// No need for PreserveJVMState, because we're using up the present state.
set_all_memory(init_mem);
- vmIntrinsics::ID hashCode_id = vmIntrinsics::_hashCode;
- if (is_static) hashCode_id = vmIntrinsics::_identityHashCode;
+ vmIntrinsics::ID hashCode_id = is_static ? vmIntrinsics::_identityHashCode : vmIntrinsics::_hashCode;
CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static);
Node* slow_result = set_results_for_java_call(slow_call);
// this->control() comes from set_results_for_java_call
@@ -4117,48 +3814,38 @@
// Return the combined state.
set_i_o( _gvn.transform(result_io) );
set_all_memory( _gvn.transform(result_mem) );
- push_result(result_reg, result_val);
-
+
+ set_result(result_reg, result_val);
return true;
}
//---------------------------inline_native_getClass----------------------------
+// public final native Class<?> java.lang.Object.getClass();
+//
// Build special case code for calls to getClass on an object.
bool LibraryCallKit::inline_native_getClass() {
- Node* obj = null_check_receiver(callee());
+ Node* obj = null_check_receiver();
if (stopped()) return true;
- push( load_mirror_from_klass(load_object_klass(obj)) );
+ set_result(load_mirror_from_klass(load_object_klass(obj)));
return true;
}
//-----------------inline_native_Reflection_getCallerClass---------------------
+// public static native Class<?> sun.reflect.Reflection.getCallerClass(int realFramesToSkip);
+//
// In the presence of deep enough inlining, getCallerClass() becomes a no-op.
//
// NOTE that this code must perform the same logic as
// vframeStream::security_get_caller_frame in that it must skip
// Method.invoke() and auxiliary frames.
-
-
-
-
bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
- ciMethod* method = callee();
-
#ifndef PRODUCT
if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
}
#endif
- debug_only(int saved_sp = _sp);
-
- // Argument words: (int depth)
- int nargs = 1;
-
- _sp += nargs;
- Node* caller_depth_node = pop();
-
- assert(saved_sp == _sp, "must have correct argument count");
+ Node* caller_depth_node = argument(0);
// The depth value must be a constant in order for the runtime call
// to be eliminated.
@@ -4230,7 +3917,8 @@
tty->print_cr(" Bailing out because caller depth (%d) exceeded inlining depth (%d)", caller_depth_type->get_con(), _depth);
tty->print_cr(" JVM state at this point:");
for (int i = _depth; i >= 1; i--) {
- tty->print_cr(" %d) %s", i, jvms()->of_depth(i)->method()->name()->as_utf8());
+ ciMethod* m = jvms()->of_depth(i)->method();
+ tty->print_cr(" %d) %s.%s", i, m->holder()->name()->as_utf8(), m->name()->as_utf8());
}
}
#endif
@@ -4240,14 +3928,17 @@
// Acquire method holder as java.lang.Class
ciInstanceKlass* caller_klass = caller_jvms->method()->holder();
ciInstance* caller_mirror = caller_klass->java_mirror();
+
// Push this as a constant
- push(makecon(TypeInstPtr::make(caller_mirror)));
+ set_result(makecon(TypeInstPtr::make(caller_mirror)));
+
#ifndef PRODUCT
if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
tty->print_cr(" Succeeded: caller = %s.%s, caller depth = %d, depth = %d", caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), caller_depth_type->get_con(), _depth);
tty->print_cr(" JVM state at this point:");
for (int i = _depth; i >= 1; i--) {
- tty->print_cr(" %d) %s", i, jvms()->of_depth(i)->method()->name()->as_utf8());
+ ciMethod* m = jvms()->of_depth(i)->method();
+ tty->print_cr(" %d) %s.%s", i, m->holder()->name()->as_utf8(), m->name()->as_utf8());
}
}
#endif
@@ -4283,36 +3974,23 @@
}
bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
- // restore the arguments
- _sp += arg_size();
+ Node* arg = argument(0);
+ Node* result;
switch (id) {
- case vmIntrinsics::_floatToRawIntBits:
- push(_gvn.transform( new (C) MoveF2INode(pop())));
- break;
-
- case vmIntrinsics::_intBitsToFloat:
- push(_gvn.transform( new (C) MoveI2FNode(pop())));
- break;
-
- case vmIntrinsics::_doubleToRawLongBits:
- push_pair(_gvn.transform( new (C) MoveD2LNode(pop_pair())));
- break;
-
- case vmIntrinsics::_longBitsToDouble:
- push_pair(_gvn.transform( new (C) MoveL2DNode(pop_pair())));
- break;
+ case vmIntrinsics::_floatToRawIntBits: result = new (C) MoveF2INode(arg); break;
+ case vmIntrinsics::_intBitsToFloat: result = new (C) MoveI2FNode(arg); break;
+ case vmIntrinsics::_doubleToRawLongBits: result = new (C) MoveD2LNode(arg); break;
+ case vmIntrinsics::_longBitsToDouble: result = new (C) MoveL2DNode(arg); break;
case vmIntrinsics::_doubleToLongBits: {
- Node* value = pop_pair();
-
// two paths (plus control) merge in a wood
RegionNode *r = new (C) RegionNode(3);
Node *phi = new (C) PhiNode(r, TypeLong::LONG);
- Node *cmpisnan = _gvn.transform( new (C) CmpDNode(value, value));
+ Node *cmpisnan = _gvn.transform(new (C) CmpDNode(arg, arg));
// Build the boolean node
- Node *bolisnan = _gvn.transform( new (C) BoolNode( cmpisnan, BoolTest::ne ) );
+ Node *bolisnan = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::ne));
// Branch either way.
// NaN case is less traveled, which makes all the difference.
@@ -4330,35 +4008,30 @@
r->init_req(1, iftrue);
// Else fall through
- Node *iffalse = _gvn.transform( new (C) IfFalseNode(opt_ifisnan) );
+ Node *iffalse = _gvn.transform(new (C) IfFalseNode(opt_ifisnan));
set_control(iffalse);
- phi->init_req(2, _gvn.transform( new (C) MoveD2LNode(value)));
+ phi->init_req(2, _gvn.transform(new (C) MoveD2LNode(arg)));
r->init_req(2, iffalse);
// Post merge
set_control(_gvn.transform(r));
record_for_igvn(r);
- Node* result = _gvn.transform(phi);
+ C->set_has_split_ifs(true); // Has chance for split-if optimization
+ result = phi;
assert(result->bottom_type()->isa_long(), "must be");
- push_pair(result);
-
- C->set_has_split_ifs(true); // Has chance for split-if optimization
-
break;
}
case vmIntrinsics::_floatToIntBits: {
- Node* value = pop();
-
// two paths (plus control) merge in a wood
RegionNode *r = new (C) RegionNode(3);
Node *phi = new (C) PhiNode(r, TypeInt::INT);
- Node *cmpisnan = _gvn.transform( new (C) CmpFNode(value, value));
+ Node *cmpisnan = _gvn.transform(new (C) CmpFNode(arg, arg));
// Build the boolean node
- Node *bolisnan = _gvn.transform( new (C) BoolNode( cmpisnan, BoolTest::ne ) );
+ Node *bolisnan = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::ne));
// Branch either way.
// NaN case is less traveled, which makes all the difference.
@@ -4376,29 +4049,27 @@
r->init_req(1, iftrue);
// Else fall through
- Node *iffalse = _gvn.transform( new (C) IfFalseNode(opt_ifisnan) );
+ Node *iffalse = _gvn.transform(new (C) IfFalseNode(opt_ifisnan));
set_control(iffalse);
- phi->init_req(2, _gvn.transform( new (C) MoveF2INode(value)));
+ phi->init_req(2, _gvn.transform(new (C) MoveF2INode(arg)));
r->init_req(2, iffalse);
// Post merge
set_control(_gvn.transform(r));
record_for_igvn(r);
- Node* result = _gvn.transform(phi);
+ C->set_has_split_ifs(true); // Has chance for split-if optimization
+ result = phi;
assert(result->bottom_type()->isa_int(), "must be");
- push(result);
-
- C->set_has_split_ifs(true); // Has chance for split-if optimization
-
break;
}
default:
- ShouldNotReachHere();
+ fatal_unexpected_iid(id);
+ break;
}
-
+ set_result(_gvn.transform(result));
return true;
}
@@ -4409,23 +4080,19 @@
#endif //_LP64
//----------------------inline_unsafe_copyMemory-------------------------
+// public native void sun.misc.Unsafe.copyMemory(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
bool LibraryCallKit::inline_unsafe_copyMemory() {
if (callee()->is_static()) return false; // caller must have the capability!
- int nargs = 1 + 5 + 3; // 5 args: (src: ptr,off, dst: ptr,off, size)
- assert(signature()->size() == nargs-1, "copy has 5 arguments");
- null_check_receiver(callee()); // check then ignore argument(0)
+ null_check_receiver(); // null-check receiver
if (stopped()) return true;
C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
- Node* src_ptr = argument(1);
- Node* src_off = ConvL2X(argument(2));
- assert(argument(3)->is_top(), "2nd half of long");
- Node* dst_ptr = argument(4);
- Node* dst_off = ConvL2X(argument(5));
- assert(argument(6)->is_top(), "2nd half of long");
- Node* size = ConvL2X(argument(7));
- assert(argument(8)->is_top(), "2nd half of long");
+ Node* src_ptr = argument(1); // type: oop
+ Node* src_off = ConvL2X(argument(2)); // type: long
+ Node* dst_ptr = argument(4); // type: oop
+ Node* dst_off = ConvL2X(argument(5)); // type: long
+ Node* size = ConvL2X(argument(7)); // type: long
assert(Unsafe_field_offset_to_byte_offset(11) == 11,
"fieldOffset must be byte-scaled");
@@ -4545,6 +4212,8 @@
}
//------------------------inline_native_clone----------------------------
+// protected native Object java.lang.Object.clone();
+//
// Here are the simple edge cases:
// null receiver => normal trap
// virtual and clone was overridden => slow path to out-of-line clone
@@ -4561,20 +4230,16 @@
// can be sharply typed as an object array, a type array, or an instance.
//
bool LibraryCallKit::inline_native_clone(bool is_virtual) {
- int nargs = 1;
PhiNode* result_val;
- //set the original stack and the reexecute bit for the interpreter to reexecute
- //the bytecode that invokes Object.clone if deoptimization happens
+ // Set the reexecute bit for the interpreter to reexecute
+ // the bytecode that invokes Object.clone if deoptimization happens.
{ PreserveReexecuteState preexecs(this);
jvms()->set_should_reexecute(true);
- //null_check_receiver will adjust _sp (push and pop)
- Node* obj = null_check_receiver(callee());
+ Node* obj = null_check_receiver();
if (stopped()) return true;
- _sp += nargs;
-
Node* obj_klass = load_object_klass(obj);
const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr();
const TypeOopPtr* toop = ((tklass != NULL)
@@ -4611,7 +4276,7 @@
set_control(array_ctl);
Node* obj_length = load_array_length(obj);
Node* obj_size = NULL;
- Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size);
+ Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size); // no arguments to push
if (!use_ReduceInitialCardMarks()) {
// If it is an oop array, it requires very special treatment,
@@ -4711,10 +4376,9 @@
set_control( _gvn.transform(result_reg) );
set_i_o( _gvn.transform(result_i_o) );
set_all_memory( _gvn.transform(result_mem) );
- } //original reexecute and sp are set back here
-
- push(_gvn.transform(result_val));
-
+ } // original reexecute is set back here
+
+ set_result(_gvn.transform(result_val));
return true;
}
@@ -4755,25 +4419,25 @@
//------------------------------inline_arraycopy-----------------------
+// public static native void java.lang.System.arraycopy(Object src, int srcPos,
+// Object dest, int destPos,
+// int length);
bool LibraryCallKit::inline_arraycopy() {
- // Restore the stack and pop off the arguments.
- int nargs = 5; // 2 oops, 3 ints, no size_t or long
- assert(callee()->signature()->size() == nargs, "copy has 5 arguments");
-
- Node *src = argument(0);
- Node *src_offset = argument(1);
- Node *dest = argument(2);
- Node *dest_offset = argument(3);
- Node *length = argument(4);
+ // Get the arguments.
+ Node* src = argument(0); // type: oop
+ Node* src_offset = argument(1); // type: int
+ Node* dest = argument(2); // type: oop
+ Node* dest_offset = argument(3); // type: int
+ Node* length = argument(4); // type: int
// Compile time checks. If any of these checks cannot be verified at compile time,
// we do not make a fast path for this call. Instead, we let the call remain as it
// is. The checks we choose to mandate at compile time are:
//
// (1) src and dest are arrays.
- const Type* src_type = src->Value(&_gvn);
+ const Type* src_type = src->Value(&_gvn);
const Type* dest_type = dest->Value(&_gvn);
- const TypeAryPtr* top_src = src_type->isa_aryptr();
+ const TypeAryPtr* top_src = src_type->isa_aryptr();
const TypeAryPtr* top_dest = dest_type->isa_aryptr();
if (top_src == NULL || top_src->klass() == NULL ||
top_dest == NULL || top_dest->klass() == NULL) {
@@ -4828,15 +4492,13 @@
record_for_igvn(slow_region);
// (3) operands must not be null
- // We currently perform our null checks with the do_null_check routine.
+ // We currently perform our null checks with the null_check routine.
// This means that the null exceptions will be reported in the caller
// rather than (correctly) reported inside of the native arraycopy call.
// This should be corrected, given time. We do our null check with the
// stack pointer restored.
- _sp += nargs;
- src = do_null_check(src, T_ARRAY);
- dest = do_null_check(dest, T_ARRAY);
- _sp -= nargs;
+ src = null_check(src, T_ARRAY);
+ dest = null_check(dest, T_ARRAY);
// (4) src_offset must not be negative.
generate_negative_guard(src_offset, slow_region);
@@ -5179,7 +4841,7 @@
slow_control = top();
if (slow_region != NULL)
slow_control = _gvn.transform(slow_region);
- debug_only(slow_region = (RegionNode*)badAddress);
+ DEBUG_ONLY(slow_region = (RegionNode*)badAddress);
set_control(checked_control);
if (!stopped()) {
@@ -5674,33 +5336,22 @@
}
//----------------------------inline_reference_get----------------------------
-
+// public T java.lang.ref.Reference.get();
bool LibraryCallKit::inline_reference_get() {
- const int nargs = 1; // self
-
- guarantee(java_lang_ref_Reference::referent_offset > 0,
- "should have already been set");
-
- int referent_offset = java_lang_ref_Reference::referent_offset;
-
- // Restore the stack and pop off the argument
- _sp += nargs;
- Node *reference_obj = pop();
-
- // Null check on self without removing any arguments.
- _sp += nargs;
- reference_obj = do_null_check(reference_obj, T_OBJECT);
- _sp -= nargs;;
-
+ const int referent_offset = java_lang_ref_Reference::referent_offset;
+ guarantee(referent_offset > 0, "should have already been set");
+
+ // Get the argument:
+ Node* reference_obj = null_check_receiver();
if (stopped()) return true;
- Node *adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
+ Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
ciInstanceKlass* klass = env()->Object_klass();
const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
Node* no_ctrl = NULL;
- Node *result = make_load(no_ctrl, adr, object_type, T_OBJECT);
+ Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT);
// Use the pre-barrier to record the value in the referent field
pre_barrier(false /* do_load */,
@@ -5713,7 +5364,7 @@
// across safepoint since GC can change its value.
insert_mem_bar(Op_MemBarCPUOrder);
- push(result);
+ set_result(result);
return true;
}
@@ -5770,15 +5421,11 @@
}
if (stubAddr == NULL) return false;
- // Restore the stack and pop off the arguments.
- int nargs = 5; // this + 2 oop/offset combos
- assert(callee()->signature()->size() == nargs-1, "encryptBlock has 4 arguments");
-
- Node *aescrypt_object = argument(0);
- Node *src = argument(1);
- Node *src_offset = argument(2);
- Node *dest = argument(3);
- Node *dest_offset = argument(4);
+ Node* aescrypt_object = argument(0);
+ Node* src = argument(1);
+ Node* src_offset = argument(2);
+ Node* dest = argument(3);
+ Node* dest_offset = argument(4);
// (1) src and dest are arrays.
const Type* src_type = src->Value(&_gvn);
@@ -5829,16 +5476,12 @@
}
if (stubAddr == NULL) return false;
-
- // Restore the stack and pop off the arguments.
- int nargs = 6; // this + oop/offset + len + oop/offset
- assert(callee()->signature()->size() == nargs-1, "wrong number of arguments");
- Node *cipherBlockChaining_object = argument(0);
- Node *src = argument(1);
- Node *src_offset = argument(2);
- Node *len = argument(3);
- Node *dest = argument(4);
- Node *dest_offset = argument(5);
+ Node* cipherBlockChaining_object = argument(0);
+ Node* src = argument(1);
+ Node* src_offset = argument(2);
+ Node* len = argument(3);
+ Node* dest = argument(4);
+ Node* dest_offset = argument(5);
// (1) src and dest are arrays.
const Type* src_type = src->Value(&_gvn);
@@ -5920,11 +5563,8 @@
//
Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) {
// First, check receiver for NULL since it is virtual method.
- int nargs = arg_size();
Node* objCBC = argument(0);
- _sp += nargs;
- objCBC = do_null_check(objCBC, T_OBJECT);
- _sp -= nargs;
+ objCBC = null_check(objCBC);
if (stopped()) return NULL; // Always NULL
@@ -5948,9 +5588,7 @@
}
ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
- _sp += nargs; // gen_instanceof might do an uncommon trap
Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
- _sp -= nargs;
Node* cmp_instof = _gvn.transform(new (C) CmpINode(instof, intcon(1)));
Node* bool_instof = _gvn.transform(new (C) BoolNode(cmp_instof, BoolTest::ne));
@@ -5966,7 +5604,7 @@
RegionNode* region = new(C) RegionNode(3);
region->init_req(1, instof_false);
Node* src = argument(1);
- Node *dest = argument(4);
+ Node* dest = argument(4);
Node* cmp_src_dest = _gvn.transform(new (C) CmpPNode(src, dest));
Node* bool_src_dest = _gvn.transform(new (C) BoolNode(cmp_src_dest, BoolTest::eq));
Node* src_dest_conjoint = generate_guard(bool_src_dest, NULL, PROB_MIN);
@@ -5974,7 +5612,4 @@
record_for_igvn(region);
return _gvn.transform(region);
-
}
-
-
--- a/hotspot/src/share/vm/opto/locknode.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/locknode.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -165,7 +165,7 @@
kill_dead_locals();
// Null check; get casted pointer.
- Node *obj = do_null_check(peek(), T_OBJECT);
+ Node* obj = null_check(peek());
// Check for locking null object
if (stopped()) return;
--- a/hotspot/src/share/vm/opto/loopTransform.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -269,10 +269,10 @@
bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const {
Node *test = ((IdealLoopTree*)this)->tail();
int body_size = ((IdealLoopTree*)this)->_body.size();
- int uniq = phase->C->unique();
+ int live_node_count = phase->C->live_nodes();
// Peeling does loop cloning which can result in O(N^2) node construction
if( body_size > 255 /* Prevent overflow for large body_size */
- || (body_size * body_size + uniq > MaxNodeLimit) ) {
+ || (body_size * body_size + live_node_count > MaxNodeLimit) ) {
return false; // too large to safely clone
}
while( test != _head ) { // Scan till run off top of loop
@@ -601,7 +601,7 @@
return false;
if (new_body_size > unroll_limit ||
// Unrolling can result in a large amount of node construction
- new_body_size >= MaxNodeLimit - phase->C->unique()) {
+ new_body_size >= MaxNodeLimit - (uint) phase->C->live_nodes()) {
return false;
}
@@ -2268,7 +2268,7 @@
// Skip next optimizations if running low on nodes. Note that
// policy_unswitching and policy_maximally_unroll have this check.
- uint nodes_left = MaxNodeLimit - phase->C->unique();
+ uint nodes_left = MaxNodeLimit - (uint) phase->C->live_nodes();
if ((2 * _body.size()) > nodes_left) {
return true;
}
--- a/hotspot/src/share/vm/opto/loopUnswitch.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/loopUnswitch.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -59,7 +59,7 @@
if (!_head->is_Loop()) {
return false;
}
- uint nodes_left = MaxNodeLimit - phase->C->unique();
+ uint nodes_left = MaxNodeLimit - phase->C->live_nodes();
if (2 * _body.size() > nodes_left) {
return false; // Too speculative if running low on nodes.
}
--- a/hotspot/src/share/vm/opto/loopopts.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/loopopts.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -729,7 +729,7 @@
for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
weight += region->fast_out(i)->outcnt();
}
- int nodes_left = MaxNodeLimit - C->unique();
+ int nodes_left = MaxNodeLimit - C->live_nodes();
if (weight * 8 > nodes_left) {
#ifndef PRODUCT
if (PrintOpto)
--- a/hotspot/src/share/vm/opto/macro.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/macro.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -2262,7 +2262,7 @@
Node *slow_ctrl = _fallthroughproj->clone();
transform_later(slow_ctrl);
_igvn.hash_delete(_fallthroughproj);
- _fallthroughproj->disconnect_inputs(NULL);
+ _fallthroughproj->disconnect_inputs(NULL, C);
region->init_req(1, slow_ctrl);
// region inputs are now complete
transform_later(region);
@@ -2327,7 +2327,7 @@
Node *slow_ctrl = _fallthroughproj->clone();
transform_later(slow_ctrl);
_igvn.hash_delete(_fallthroughproj);
- _fallthroughproj->disconnect_inputs(NULL);
+ _fallthroughproj->disconnect_inputs(NULL, C);
region->init_req(1, slow_ctrl);
// region inputs are now complete
transform_later(region);
--- a/hotspot/src/share/vm/opto/matcher.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/matcher.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -342,6 +342,7 @@
// Reset node counter so MachNodes start with _idx at 0
int nodes = C->unique(); // save value
C->set_unique(0);
+ C->reset_dead_node_list();
// Recursively match trees from old space into new space.
// Correct leaves of new-space Nodes; they point to old-space.
--- a/hotspot/src/share/vm/opto/node.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/node.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -57,7 +57,7 @@
int new_debug_idx = old_debug_idx+1;
if (new_debug_idx > 0) {
// Arrange that the lowest five decimal digits of _debug_idx
- // will repeat thos of _idx. In case this is somehow pathological,
+ // will repeat those of _idx. In case this is somehow pathological,
// we continue to assign negative numbers (!) consecutively.
const int mod = 100000;
int bump = (int)(_idx - new_debug_idx) % mod;
@@ -67,7 +67,7 @@
}
Compile::set_debug_idx(new_debug_idx);
set_debug_idx( new_debug_idx );
- assert(Compile::current()->unique() < (uint)MaxNodeLimit, "Node limit exceeded");
+ assert(Compile::current()->unique() < (UINT_MAX - 1), "Node limit exceeded UINT_MAX");
if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) {
tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx);
BREAKPOINT;
@@ -802,7 +802,7 @@
//-------------------------disconnect_inputs-----------------------------------
// NULL out all inputs to eliminate incoming Def-Use edges.
// Return the number of edges between 'n' and 'this'
-int Node::disconnect_inputs(Node *n) {
+int Node::disconnect_inputs(Node *n, Compile* C) {
int edges_to_n = 0;
uint cnt = req();
@@ -824,6 +824,9 @@
// Node::destruct requires all out edges be deleted first
// debug_only(destruct();) // no reuse benefit expected
+ if (edges_to_n == 0) {
+ C->record_dead_node(_idx);
+ }
return edges_to_n;
}
--- a/hotspot/src/share/vm/opto/node.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/node.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -410,7 +410,7 @@
int replace_edge(Node* old, Node* neww);
// NULL out all inputs to eliminate incoming Def-Use edges.
// Return the number of edges between 'n' and 'this'
- int disconnect_inputs(Node *n);
+ int disconnect_inputs(Node *n, Compile *c);
// Quickly, return true if and only if I am Compile::current()->top().
bool is_top() const {
@@ -458,9 +458,9 @@
void replace_by(Node* new_node);
// Globally replace this node by a given new node, updating all uses
// and cutting input edges of old node.
- void subsume_by(Node* new_node) {
+ void subsume_by(Node* new_node, Compile* c) {
replace_by(new_node);
- disconnect_inputs(NULL);
+ disconnect_inputs(NULL, c);
}
void set_req_X( uint i, Node *n, PhaseIterGVN *igvn );
// Find the one non-null required input. RegionNode only
--- a/hotspot/src/share/vm/opto/output.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/output.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -513,7 +513,7 @@
}
adjust_block_start += diff;
b->_nodes.map(idx, replacement);
- mach->subsume_by(replacement);
+ mach->subsume_by(replacement, C);
mach = replacement;
progress = true;
@@ -1425,7 +1425,7 @@
jmp_rule[i] = mach->rule();
#endif
b->_nodes.map(j, replacement);
- mach->subsume_by(replacement);
+ mach->subsume_by(replacement, C);
n = replacement;
mach = replacement;
}
--- a/hotspot/src/share/vm/opto/parse1.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/parse1.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -601,8 +601,8 @@
set_map(entry_map);
do_exits();
- if (log) log->done("parse nodes='%d' memory='%d'",
- C->unique(), C->node_arena()->used());
+ if (log) log->done("parse nodes='%d' live='%d' memory='%d'",
+ C->unique(), C->live_nodes(), C->node_arena()->used());
}
//---------------------------do_all_blocks-------------------------------------
@@ -1008,7 +1008,7 @@
// If this is an inlined method, we may have to do a receiver null check.
if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
GraphKit kit(_caller);
- kit.null_check_receiver(method());
+ kit.null_check_receiver_before_call(method());
_caller = kit.transfer_exceptions_into_jvms();
if (kit.stopped()) {
_exits.add_exception_states_from(_caller);
@@ -1398,7 +1398,7 @@
#ifdef ASSERT
int pre_bc_sp = sp();
int inputs, depth;
- bool have_se = !stopped() && compute_stack_effects(inputs, depth, /*for_parse*/ true);
+ bool have_se = !stopped() && compute_stack_effects(inputs, depth);
assert(!have_se || pre_bc_sp >= inputs, err_msg_res("have enough stack to execute this BC: pre_bc_sp=%d, inputs=%d", pre_bc_sp, inputs));
#endif //ASSERT
--- a/hotspot/src/share/vm/opto/parse2.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/parse2.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -48,7 +48,7 @@
const Type* elem = Type::TOP;
Node* adr = array_addressing(elem_type, 0, &elem);
if (stopped()) return; // guaranteed null or range check
- _sp -= 2; // Pop array and index
+ dec_sp(2); // Pop array and index
const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
Node* ld = make_load(control(), adr, elem, elem_type, adr_type);
push(ld);
@@ -60,7 +60,7 @@
Node* adr = array_addressing(elem_type, 1);
if (stopped()) return; // guaranteed null or range check
Node* val = pop();
- _sp -= 2; // Pop array and index
+ dec_sp(2); // Pop array and index
const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
store_to_memory(control(), adr, val, elem_type, adr_type);
}
@@ -73,7 +73,7 @@
Node *ary = peek(1+vals); // in case of exception
// Null check the array base, with correct stack contents
- ary = do_null_check(ary, T_ARRAY);
+ ary = null_check(ary, T_ARRAY);
// Compile-time detect of null-exception?
if (stopped()) return top();
@@ -681,7 +681,7 @@
void Parse::do_irem() {
// Must keep both values on the expression-stack during null-check
- do_null_check(peek(), T_INT);
+ zero_check_int(peek());
// Compile-time detect of null-exception?
if (stopped()) return;
@@ -958,7 +958,7 @@
DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms
assert(argument(0) != NULL, "must exist");
assert(bc_depth == 1 || argument(1) != NULL, "two must exist");
- _sp += bc_depth;
+ inc_sp(bc_depth);
return bc_depth;
}
@@ -1581,8 +1581,8 @@
set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) );
break;
- case Bytecodes::_pop: _sp -= 1; break;
- case Bytecodes::_pop2: _sp -= 2; break;
+ case Bytecodes::_pop: dec_sp(1); break;
+ case Bytecodes::_pop2: dec_sp(2); break;
case Bytecodes::_swap:
a = pop();
b = pop();
@@ -1650,7 +1650,7 @@
case Bytecodes::_arraylength: {
// Must do null-check with value on expression stack
- Node *ary = do_null_check(peek(), T_ARRAY);
+ Node *ary = null_check(peek(), T_ARRAY);
// Compile-time detect of null-exception?
if (stopped()) return;
a = pop();
@@ -1667,15 +1667,15 @@
case Bytecodes::_laload: {
a = array_addressing(T_LONG, 0);
if (stopped()) return; // guaranteed null or range check
- _sp -= 2; // Pop array and index
- push_pair( make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS));
+ dec_sp(2); // Pop array and index
+ push_pair(make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS));
break;
}
case Bytecodes::_daload: {
a = array_addressing(T_DOUBLE, 0);
if (stopped()) return; // guaranteed null or range check
- _sp -= 2; // Pop array and index
- push_pair( make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES));
+ dec_sp(2); // Pop array and index
+ push_pair(make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES));
break;
}
case Bytecodes::_bastore: array_store(T_BYTE); break;
@@ -1699,7 +1699,7 @@
a = array_addressing(T_LONG, 2);
if (stopped()) return; // guaranteed null or range check
c = pop_pair();
- _sp -= 2; // Pop array and index
+ dec_sp(2); // Pop array and index
store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS);
break;
}
@@ -1707,7 +1707,7 @@
a = array_addressing(T_DOUBLE, 2);
if (stopped()) return; // guaranteed null or range check
c = pop_pair();
- _sp -= 2; // Pop array and index
+ dec_sp(2); // Pop array and index
c = dstore_rounding(c);
store_to_memory(control(), a, c, T_DOUBLE, TypeAryPtr::DOUBLES);
break;
@@ -1733,7 +1733,7 @@
break;
case Bytecodes::_idiv:
// Must keep both values on the expression-stack during null-check
- do_null_check(peek(), T_INT);
+ zero_check_int(peek());
// Compile-time detect of null-exception?
if (stopped()) return;
b = pop();
@@ -2041,7 +2041,7 @@
case Bytecodes::_lrem:
// Must keep both values on the expression-stack during null-check
assert(peek(0) == top(), "long word order");
- do_null_check(peek(1), T_LONG);
+ zero_check_long(peek(1));
// Compile-time detect of null-exception?
if (stopped()) return;
b = pop_pair();
@@ -2053,7 +2053,7 @@
case Bytecodes::_ldiv:
// Must keep both values on the expression-stack during null-check
assert(peek(0) == top(), "long word order");
- do_null_check(peek(1), T_LONG);
+ zero_check_long(peek(1));
// Compile-time detect of null-exception?
if (stopped()) return;
b = pop_pair();
@@ -2175,7 +2175,7 @@
case Bytecodes::_athrow:
// null exception oop throws NULL pointer exception
- do_null_check(peek(), T_OBJECT);
+ null_check(peek());
if (stopped()) return;
// Hook the thrown exception directly to subsequent handlers.
if (BailoutToInterpreterForThrows) {
--- a/hotspot/src/share/vm/opto/parse3.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/parse3.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -116,7 +116,7 @@
Node* obj;
if (is_field) {
int obj_depth = is_get ? 0 : field->type()->size();
- obj = do_null_check(peek(obj_depth), T_OBJECT);
+ obj = null_check(peek(obj_depth));
// Compile-time detect of null-exception?
if (stopped()) return;
@@ -126,11 +126,11 @@
#endif
if (is_get) {
- --_sp; // pop receiver before getting
+ (void) pop(); // pop receiver before getting
do_get_xxx(obj, field, is_field);
} else {
do_put_xxx(obj, field, is_field);
- --_sp; // pop receiver after putting
+ (void) pop(); // pop receiver after putting
}
} else {
const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror());
@@ -230,7 +230,7 @@
}
// If there is going to be a trap, put it at the next bytecode:
set_bci(iter().next_bci());
- do_null_assert(peek(), T_OBJECT);
+ null_assert(peek());
set_bci(iter().cur_bci()); // put it back
}
@@ -463,7 +463,7 @@
// Note: the reexecute bit will be set in GraphKit::add_safepoint_edges()
// when AllocateArray node for newarray is created.
{ PreserveReexecuteState preexecs(this);
- _sp += ndimensions;
+ inc_sp(ndimensions);
// Pass 0 as nargs since uncommon trap code does not need to restore stack.
obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0);
} //original reexecute and sp are set back here
@@ -492,7 +492,7 @@
// Create a java array for dimension sizes
Node* dims = NULL;
{ PreserveReexecuteState preexecs(this);
- _sp += ndimensions;
+ inc_sp(ndimensions);
Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT))));
dims = new_array(dims_array_klass, intcon(ndimensions), 0);
--- a/hotspot/src/share/vm/opto/parseHelper.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/parseHelper.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -84,7 +84,7 @@
C->log()->identify(tp->klass()));
}
}
- do_null_assert(obj, T_OBJECT);
+ null_assert(obj);
assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" );
if (!stopped()) {
profile_null_checkcast();
@@ -116,7 +116,7 @@
C->log()->elem("assert_null reason='instanceof' klass='%d'",
C->log()->identify(klass));
}
- do_null_assert(peek(), T_OBJECT);
+ null_assert(peek());
assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" );
if (!stopped()) {
// The object is now known to be null.
@@ -139,10 +139,10 @@
// pull array from stack and check that the store is valid
void Parse::array_store_check() {
- // Shorthand access to array store elements
- Node *obj = stack(_sp-1);
- Node *idx = stack(_sp-2);
- Node *ary = stack(_sp-3);
+ // Shorthand access to array store elements without popping them.
+ Node *obj = peek(0);
+ Node *idx = peek(1);
+ Node *ary = peek(2);
if (_gvn.type(obj) == TypePtr::NULL_PTR) {
// There's never a type check on null values.
--- a/hotspot/src/share/vm/opto/phaseX.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/phaseX.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -383,6 +383,8 @@
// Identify nodes that are reachable from below, useful.
C->identify_useful_nodes(_useful);
+ // Update dead node list
+ C->update_dead_node_list(_useful);
// Remove all useless nodes from PhaseValues' recorded types
// Must be done before disconnecting nodes to preserve hash-table-invariant
@@ -1190,7 +1192,7 @@
}
}
}
-
+ C->record_dead_node(dead->_idx);
if (dead->is_macro()) {
C->remove_macro_node(dead);
}
@@ -1199,6 +1201,11 @@
continue;
}
}
+ // Constant node that has no out-edges and has only one in-edge from
+ // root is usually dead. However, sometimes reshaping walk makes
+ // it reachable by adding use edges. So, we will NOT count Con nodes
+ // as dead to be conservative about the dead node count at any
+ // given time.
}
// Aggressively kill globally dead uses
--- a/hotspot/src/share/vm/opto/postaloc.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/postaloc.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -146,7 +146,7 @@
}
}
// Disconnect control and remove precedence edges if any exist
- old->disconnect_inputs(NULL);
+ old->disconnect_inputs(NULL, C);
}
return blk_adjust;
}
@@ -513,7 +513,7 @@
b->_nodes.remove(j--); phi_dex--;
_cfg._bbs.map(phi->_idx,NULL);
phi->replace_by(u);
- phi->disconnect_inputs(NULL);
+ phi->disconnect_inputs(NULL, C);
continue;
}
// Note that if value[pidx] exists, then we merged no new values here
--- a/hotspot/src/share/vm/opto/reg_split.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/reg_split.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -747,7 +747,7 @@
if( i >= cnt ) { // Found one unique input
assert(Find_id(n) == Find_id(u), "should be the same lrg");
n->replace_by(u); // Then replace with unique input
- n->disconnect_inputs(NULL);
+ n->disconnect_inputs(NULL, C);
b->_nodes.remove(insidx);
insidx--;
b->_ihrp_index--;
--- a/hotspot/src/share/vm/opto/stringopts.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/stringopts.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -241,13 +241,13 @@
_stringopts->gvn()->transform(call);
C->gvn_replace_by(uct, call);
- uct->disconnect_inputs(NULL);
+ uct->disconnect_inputs(NULL, C);
}
}
void cleanup() {
// disconnect the hook node
- _arguments->disconnect_inputs(NULL);
+ _arguments->disconnect_inputs(NULL, _stringopts->C);
}
};
@@ -358,7 +358,7 @@
C->gvn_replace_by(mem_proj, mem);
}
C->gvn_replace_by(init, C->top());
- init->disconnect_inputs(NULL);
+ init->disconnect_inputs(NULL, C);
}
Node_List PhaseStringOpts::collect_toString_calls() {
@@ -1477,6 +1477,6 @@
kit.replace_call(sc->end(), result);
// Unhook any hook nodes
- string_sizes->disconnect_inputs(NULL);
+ string_sizes->disconnect_inputs(NULL, C);
sc->cleanup();
}
--- a/hotspot/src/share/vm/opto/type.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/opto/type.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -242,8 +242,10 @@
const TypeInt *isa_int() const; // Returns NULL if not an Int
const TypeLong *is_long() const;
const TypeLong *isa_long() const; // Returns NULL if not a Long
+ const TypeD *isa_double() const; // Returns NULL if not a Double{Top,Con,Bot}
const TypeD *is_double_constant() const; // Asserts it is a DoubleCon
const TypeD *isa_double_constant() const; // Returns NULL if not a DoubleCon
+ const TypeF *isa_float() const; // Returns NULL if not a Float{Top,Con,Bot}
const TypeF *is_float_constant() const; // Asserts it is a FloatCon
const TypeF *isa_float_constant() const; // Returns NULL if not a FloatCon
const TypeTuple *is_tuple() const; // Collection of fields, NOT a pointer
@@ -1320,24 +1322,6 @@
return ((TypeD*)this)->_d;
}
-inline const TypeF *Type::is_float_constant() const {
- assert( _base == FloatCon, "Not a Float" );
- return (TypeF*)this;
-}
-
-inline const TypeF *Type::isa_float_constant() const {
- return ( _base == FloatCon ? (TypeF*)this : NULL);
-}
-
-inline const TypeD *Type::is_double_constant() const {
- assert( _base == DoubleCon, "Not a Double" );
- return (TypeD*)this;
-}
-
-inline const TypeD *Type::isa_double_constant() const {
- return ( _base == DoubleCon ? (TypeD*)this : NULL);
-}
-
inline const TypeInt *Type::is_int() const {
assert( _base == Int, "Not an Int" );
return (TypeInt*)this;
@@ -1356,6 +1340,36 @@
return ( _base == Long ? (TypeLong*)this : NULL);
}
+inline const TypeF *Type::isa_float() const {
+ return ((_base == FloatTop ||
+ _base == FloatCon ||
+ _base == FloatBot) ? (TypeF*)this : NULL);
+}
+
+inline const TypeF *Type::is_float_constant() const {
+ assert( _base == FloatCon, "Not a Float" );
+ return (TypeF*)this;
+}
+
+inline const TypeF *Type::isa_float_constant() const {
+ return ( _base == FloatCon ? (TypeF*)this : NULL);
+}
+
+inline const TypeD *Type::isa_double() const {
+ return ((_base == DoubleTop ||
+ _base == DoubleCon ||
+ _base == DoubleBot) ? (TypeD*)this : NULL);
+}
+
+inline const TypeD *Type::is_double_constant() const {
+ assert( _base == DoubleCon, "Not a Double" );
+ return (TypeD*)this;
+}
+
+inline const TypeD *Type::isa_double_constant() const {
+ return ( _base == DoubleCon ? (TypeD*)this : NULL);
+}
+
inline const TypeTuple *Type::is_tuple() const {
assert( _base == Tuple, "Not a Tuple" );
return (TypeTuple*)this;
--- a/hotspot/src/share/vm/prims/forte.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/prims/forte.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -216,10 +216,7 @@
// not yet valid.
*method_p = method;
-
- // See if gc may have invalidated method since we validated frame
-
- if (!Universe::heap()->is_valid_method(method)) return false;
+ if (!method->is_valid_method()) return false;
intptr_t bcx = fr->interpreter_frame_bcx();
@@ -394,19 +391,11 @@
bool fully_decipherable = find_initial_Java_frame(thd, &top_frame, &initial_Java_frame, &method, &bci);
// The frame might not be walkable but still recovered a method
- // (e.g. an nmethod with no scope info for the pc
+ // (e.g. an nmethod with no scope info for the pc)
if (method == NULL) return;
- CollectedHeap* ch = Universe::heap();
-
- // The method is not stored GC safe so see if GC became active
- // after we entered AsyncGetCallTrace() and before we try to
- // use the Method*.
- // Yes, there is still a window after this check and before
- // we use Method* below, but we can't lock out GC so that
- // has to be an acceptable risk.
- if (!ch->is_valid_method(method)) {
+ if (!method->is_valid_method()) {
trace->num_frames = ticks_GC_active; // -2
return;
}
@@ -440,13 +429,7 @@
bci = st.bci();
method = st.method();
- // The method is not stored GC safe so see if GC became active
- // after we entered AsyncGetCallTrace() and before we try to
- // use the Method*.
- // Yes, there is still a window after this check and before
- // we use Method* below, but we can't lock out GC so that
- // has to be an acceptable risk.
- if (!ch->is_valid_method(method)) {
+ if (!method->is_valid_method()) {
// we throw away everything we've gathered in this sample since
// none of it is safe
trace->num_frames = ticks_GC_active; // -2
--- a/hotspot/src/share/vm/prims/jni.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/prims/jni.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -69,6 +69,7 @@
#include "runtime/reflection.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/vm_operations.hpp"
#include "services/runtimeService.hpp"
#include "trace/tracing.hpp"
@@ -79,19 +80,15 @@
#include "utilities/histogram.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
-# include "thread_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
-# include "thread_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
-# include "thread_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
-# include "thread_bsd.inline.hpp"
#endif
static jint CurrentVersion = JNI_VERSION_1_6;
@@ -5044,6 +5041,9 @@
#include "gc_interface/collectedHeap.hpp"
#include "utilities/quickSort.hpp"
+#if INCLUDE_VM_STRUCTS
+#include "runtime/vmStructs.hpp"
+#endif
#define run_unit_test(unit_test_function_call) \
tty->print_cr("Running test: " #unit_test_function_call); \
@@ -5056,6 +5056,9 @@
run_unit_test(CollectedHeap::test_is_in());
run_unit_test(QuickSort::test_quick_sort());
run_unit_test(AltHashing::test_alt_hash());
+#if INCLUDE_VM_STRUCTS
+ run_unit_test(VMStructs::test());
+#endif
tty->print_cr("All internal VM tests passed");
}
}
--- a/hotspot/src/share/vm/prims/jvmti.xml Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/prims/jvmti.xml Mon Dec 17 08:30:06 2012 -0500
@@ -2370,11 +2370,11 @@
jvmtiError err;
err = (*jvmti)->GetStackTrace(jvmti, aThread, 0, 5,
- &frames, &count);
+ frames, &count);
if (err == JVMTI_ERROR_NONE && count >= 1) {
char *methodName;
err = (*jvmti)->GetMethodName(jvmti, frames[0].method,
- &methodName, NULL);
+ &methodName, NULL, NULL);
if (err == JVMTI_ERROR_NONE) {
printf("Executing method: %s", methodName);
}
--- a/hotspot/src/share/vm/prims/jvmtiEnv.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/prims/jvmtiEnv.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -53,24 +53,12 @@
#include "runtime/osThread.hpp"
#include "runtime/reflectionUtils.hpp"
#include "runtime/signature.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vmThread.hpp"
#include "services/threadService.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/preserveException.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
-
#define FIXLATER 0 // REMOVE this when completed.
--- a/hotspot/src/share/vm/prims/jvmtiImpl.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/prims/jvmtiImpl.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -41,22 +41,11 @@
#include "runtime/os.hpp"
#include "runtime/serviceThread.hpp"
#include "runtime/signature.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vframe_hp.hpp"
#include "runtime/vm_operations.hpp"
#include "utilities/exceptions.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
//
// class JvmtiAgentThread
--- a/hotspot/src/share/vm/prims/unsafe.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/prims/unsafe.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -996,7 +996,7 @@
// not just a literal string. For such ldc instructions, the verifier uses the
// type Object instead of String, if the loaded constant is not in fact a String.
-static oop
+static instanceKlassHandle
Unsafe_DefineAnonymousClass_impl(JNIEnv *env,
jclass host_class, jbyteArray data, jobjectArray cp_patches_jh,
HeapWord* *temp_alloc,
@@ -1073,32 +1073,39 @@
anon_klass = instanceKlassHandle(THREAD, anonk);
}
- // let caller initialize it as needed...
-
- return anon_klass->java_mirror();
+ return anon_klass;
}
UNSAFE_ENTRY(jclass, Unsafe_DefineAnonymousClass(JNIEnv *env, jobject unsafe, jclass host_class, jbyteArray data, jobjectArray cp_patches_jh))
{
+ instanceKlassHandle anon_klass;
+ jobject res_jh = NULL;
+
UnsafeWrapper("Unsafe_DefineAnonymousClass");
ResourceMark rm(THREAD);
HeapWord* temp_alloc = NULL;
- jobject res_jh = NULL;
-
- { oop res_oop = Unsafe_DefineAnonymousClass_impl(env,
- host_class, data, cp_patches_jh,
+ anon_klass = Unsafe_DefineAnonymousClass_impl(env, host_class, data,
+ cp_patches_jh,
&temp_alloc, THREAD);
- if (res_oop != NULL)
- res_jh = JNIHandles::make_local(env, res_oop);
- }
+ if (anon_klass() != NULL)
+ res_jh = JNIHandles::make_local(env, anon_klass->java_mirror());
// try/finally clause:
if (temp_alloc != NULL) {
FREE_C_HEAP_ARRAY(HeapWord, temp_alloc, mtInternal);
}
+ // The anonymous class loader data has been artificially been kept alive to
+ // this point. The mirror and any instances of this class have to keep
+ // it alive afterwards.
+ if (anon_klass() != NULL) {
+ anon_klass->class_loader_data()->set_keep_alive(false);
+ }
+
+ // let caller initialize it as needed...
+
return (jclass) res_jh;
}
UNSAFE_END
--- a/hotspot/src/share/vm/runtime/arguments.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -1485,14 +1485,6 @@
}
}
}
- if (UseNUMA) {
- if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) {
- FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
- }
- // For those collectors or operating systems (eg, Windows) that do
- // not support full UseNUMA, we will map to UseNUMAInterleaving for now
- UseNUMAInterleaving = true;
- }
}
void Arguments::set_g1_gc_flags() {
@@ -3332,6 +3324,22 @@
return JNI_OK;
}
+jint Arguments::adjust_after_os() {
+#if INCLUDE_ALTERNATE_GCS
+ if (UseParallelGC || UseParallelOldGC) {
+ if (UseNUMA) {
+ if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) {
+ FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
+ }
+ // For those collectors or operating systems (eg, Windows) that do
+ // not support full UseNUMA, we will map to UseNUMAInterleaving for now
+ UseNUMAInterleaving = true;
+ }
+ }
+#endif
+ return JNI_OK;
+}
+
int Arguments::PropertyList_count(SystemProperty* pl) {
int count = 0;
while(pl != NULL) {
--- a/hotspot/src/share/vm/runtime/arguments.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/arguments.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -409,6 +409,8 @@
public:
// Parses the arguments
static jint parse(const JavaVMInitArgs* args);
+ // Adjusts the arguments after the OS have adjusted the arguments
+ static jint adjust_after_os();
// Check for consistency in the selection of the garbage collector.
static bool check_gc_consistency();
// Check consistecy or otherwise of VM argument settings
--- a/hotspot/src/share/vm/runtime/atomic.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/atomic.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -36,36 +36,8 @@
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
#endif
-#ifdef TARGET_OS_ARCH_linux_x86
-# include "atomic_linux_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_sparc
-# include "atomic_linux_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_zero
-# include "atomic_linux_zero.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_x86
-# include "atomic_solaris_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_sparc
-# include "atomic_solaris_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_windows_x86
-# include "atomic_windows_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_arm
-# include "atomic_linux_arm.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_ppc
-# include "atomic_linux_ppc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_x86
-# include "atomic_bsd_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_zero
-# include "atomic_bsd_zero.inline.hpp"
-#endif
+
+#include "runtime/atomic.inline.hpp"
jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
assert(sizeof(jbyte) == 1, "assumption.");
--- a/hotspot/src/share/vm/runtime/atomic.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/atomic.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -30,60 +30,59 @@
class Atomic : AllStatic {
public:
// Atomically store to a location
- static void store (jbyte store_value, jbyte* dest);
- static void store (jshort store_value, jshort* dest);
- static void store (jint store_value, jint* dest);
- static void store (jlong store_value, jlong* dest);
- static void store_ptr(intptr_t store_value, intptr_t* dest);
- static void store_ptr(void* store_value, void* dest);
+ inline static void store (jbyte store_value, jbyte* dest);
+ inline static void store (jshort store_value, jshort* dest);
+ inline static void store (jint store_value, jint* dest);
+ inline static void store (jlong store_value, jlong* dest);
+ inline static void store_ptr(intptr_t store_value, intptr_t* dest);
+ inline static void store_ptr(void* store_value, void* dest);
- static void store (jbyte store_value, volatile jbyte* dest);
- static void store (jshort store_value, volatile jshort* dest);
- static void store (jint store_value, volatile jint* dest);
- static void store (jlong store_value, volatile jlong* dest);
- static void store_ptr(intptr_t store_value, volatile intptr_t* dest);
- static void store_ptr(void* store_value, volatile void* dest);
+ inline static void store (jbyte store_value, volatile jbyte* dest);
+ inline static void store (jshort store_value, volatile jshort* dest);
+ inline static void store (jint store_value, volatile jint* dest);
+ inline static void store (jlong store_value, volatile jlong* dest);
+ inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest);
+ inline static void store_ptr(void* store_value, volatile void* dest);
- static jlong load(volatile jlong* src);
+ inline static jlong load(volatile jlong* src);
// Atomically add to a location, return updated value
- static jint add (jint add_value, volatile jint* dest);
- static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
- static void* add_ptr(intptr_t add_value, volatile void* dest);
+ inline static jint add (jint add_value, volatile jint* dest);
+ inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
+ inline static void* add_ptr(intptr_t add_value, volatile void* dest);
- static jlong add (jlong add_value, volatile jlong* dest);
+ static jlong add (jlong add_value, volatile jlong* dest);
// Atomically increment location
- static void inc (volatile jint* dest);
- static void inc_ptr(volatile intptr_t* dest);
- static void inc_ptr(volatile void* dest);
+ inline static void inc (volatile jint* dest);
+ inline static void inc_ptr(volatile intptr_t* dest);
+ inline static void inc_ptr(volatile void* dest);
// Atomically decrement a location
- static void dec (volatile jint* dest);
- static void dec_ptr(volatile intptr_t* dest);
- static void dec_ptr(volatile void* dest);
+ inline static void dec (volatile jint* dest);
+ inline static void dec_ptr(volatile intptr_t* dest);
+ inline static void dec_ptr(volatile void* dest);
// Performs atomic exchange of *dest with exchange_value. Returns old prior value of *dest.
- static jint xchg(jint exchange_value, volatile jint* dest);
- static unsigned int xchg(unsigned int exchange_value,
- volatile unsigned int* dest);
+ inline static jint xchg(jint exchange_value, volatile jint* dest);
+ static unsigned int xchg(unsigned int exchange_value, volatile unsigned int* dest);
- static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest);
- static void* xchg_ptr(void* exchange_value, volatile void* dest);
+ inline static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest);
+ inline static void* xchg_ptr(void* exchange_value, volatile void* dest);
// Performs atomic compare of *dest and compare_value, and exchanges *dest with exchange_value
// if the comparison succeeded. Returns prior value of *dest. Guarantees a two-way memory
// barrier across the cmpxchg. I.e., it's really a 'fence_cmpxchg_acquire'.
- static jbyte cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value);
- static jint cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value);
- static jlong cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value);
+ static jbyte cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value);
+ inline static jint cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value);
+ inline static jlong cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value);
- static unsigned int cmpxchg(unsigned int exchange_value,
- volatile unsigned int* dest,
- unsigned int compare_value);
+ static unsigned int cmpxchg(unsigned int exchange_value,
+ volatile unsigned int* dest,
+ unsigned int compare_value);
- static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value);
- static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value);
+ inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value);
+ inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value);
};
#endif // SHARE_VM_RUNTIME_ATOMIC_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/runtime/atomic.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_ATOMIC_INLINE_HPP
+#define SHARE_VM_RUNTIME_ATOMIC_INLINE_HPP
+
+#include "runtime/atomic.hpp"
+
+// Linux
+#ifdef TARGET_OS_ARCH_linux_x86
+# include "atomic_linux_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_linux_sparc
+# include "atomic_linux_sparc.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_linux_zero
+# include "atomic_linux_zero.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_linux_arm
+# include "atomic_linux_arm.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_linux_ppc
+# include "atomic_linux_ppc.inline.hpp"
+#endif
+
+// Solaris
+#ifdef TARGET_OS_ARCH_solaris_x86
+# include "atomic_solaris_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_solaris_sparc
+# include "atomic_solaris_sparc.inline.hpp"
+#endif
+
+// Windows
+#ifdef TARGET_OS_ARCH_windows_x86
+# include "atomic_windows_x86.inline.hpp"
+#endif
+
+// BSD
+#ifdef TARGET_OS_ARCH_bsd_x86
+# include "atomic_bsd_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_bsd_zero
+# include "atomic_bsd_zero.inline.hpp"
+#endif
+
+#endif // SHARE_VM_RUNTIME_ATOMIC_INLINE_HPP
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -721,7 +721,7 @@
guarantee(false, "wrong number of expression stack elements during deopt");
}
VerifyOopClosure verify;
- iframe->oops_interpreted_do(&verify, &rm, false);
+ iframe->oops_interpreted_do(&verify, NULL, &rm, false);
callee_size_of_parameters = mh->size_of_parameters();
callee_max_locals = mh->max_locals();
is_top_frame = false;
@@ -1242,8 +1242,8 @@
nmethodLocker nl(fr.pc());
// Log a message
- Events::log_deopt_message(thread, "Uncommon trap %d fr.pc " INTPTR_FORMAT,
- trap_request, fr.pc());
+ Events::log(thread, "Uncommon trap: trap_request=" PTR32_FORMAT " fr.pc=" INTPTR_FORMAT,
+ trap_request, fr.pc());
{
ResourceMark rm;
@@ -1274,6 +1274,11 @@
MethodData* trap_mdo =
get_method_data(thread, trap_method, create_if_missing);
+ // Log a message
+ Events::log_deopt_message(thread, "Uncommon trap: reason=%s action=%s pc=" INTPTR_FORMAT " method=%s @ %d",
+ trap_reason_name(reason), trap_action_name(action), fr.pc(),
+ trap_method->name_and_sig_as_C_string(), trap_bci);
+
// Print a bunch of diagnostics, if requested.
if (TraceDeoptimization || LogCompilation) {
ResourceMark rm;
--- a/hotspot/src/share/vm/runtime/fprofiler.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/fprofiler.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,18 +26,6 @@
#define SHARE_VM_RUNTIME_FPROFILER_HPP
#include "runtime/timer.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
// a simple flat profiler for Java
--- a/hotspot/src/share/vm/runtime/frame.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/frame.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "compiler/disassembler.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/oopMapCache.hpp"
@@ -879,7 +880,8 @@
}
-void frame::oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache) {
+void frame::oops_interpreted_do(OopClosure* f, CLDToOopClosure* cld_f,
+ const RegisterMap* map, bool query_oop_map_cache) {
assert(is_interpreted_frame(), "Not an interpreted frame");
assert(map != NULL, "map must be set");
Thread *thread = Thread::current();
@@ -906,6 +908,16 @@
}
// process fixed part
+ if (cld_f != NULL) {
+ // The method pointer in the frame might be the only path to the method's
+ // klass, and the klass needs to be kept alive while executing. The GCs
+ // don't trace through method pointers, so typically in similar situations
+ // the mirror or the class loader of the klass are installed as a GC root.
+ // To minimze the overhead of doing that here, we ask the GC to pass down a
+ // closure that knows how to keep klasses alive given a ClassLoaderData.
+ cld_f->do_cld(m->method_holder()->class_loader_data());
+ }
+
#if !defined(PPC) || defined(ZERO)
if (m->is_native()) {
#ifdef CC_INTERP
@@ -1108,7 +1120,7 @@
}
-void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) {
+void frame::oops_do_internal(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) {
#ifndef PRODUCT
// simulate GC crash here to dump java thread in error report
if (CrashGCForDumpingJavaThread) {
@@ -1117,7 +1129,7 @@
}
#endif
if (is_interpreted_frame()) {
- oops_interpreted_do(f, map, use_interpreter_oop_map_cache);
+ oops_interpreted_do(f, cld_f, map, use_interpreter_oop_map_cache);
} else if (is_entry_frame()) {
oops_entry_do(f, map);
} else if (CodeCache::contains(pc())) {
@@ -1278,7 +1290,7 @@
}
}
COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), "must be empty before verify");)
- oops_do_internal(&VerifyOopClosure::verify_oop, NULL, (RegisterMap*)map, false);
+ oops_do_internal(&VerifyOopClosure::verify_oop, NULL, NULL, (RegisterMap*)map, false);
}
--- a/hotspot/src/share/vm/runtime/frame.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/frame.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,7 +25,6 @@
#ifndef SHARE_VM_RUNTIME_FRAME_HPP
#define SHARE_VM_RUNTIME_FRAME_HPP
-#include "asm/assembler.hpp"
#include "oops/method.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/monitorChunk.hpp"
@@ -413,19 +412,19 @@
// Oops-do's
void oops_compiled_arguments_do(Symbol* signature, bool has_receiver, const RegisterMap* reg_map, OopClosure* f);
- void oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache = true);
+ void oops_interpreted_do(OopClosure* f, CLDToOopClosure* cld_f, const RegisterMap* map, bool query_oop_map_cache = true);
private:
void oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f);
// Iteration of oops
- void oops_do_internal(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache);
+ void oops_do_internal(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache);
void oops_entry_do(OopClosure* f, const RegisterMap* map);
void oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map);
int adjust_offset(Method* method, int index); // helper for above fn
public:
// Memory management
- void oops_do(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map) { oops_do_internal(f, cf, map, true); }
+ void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map) { oops_do_internal(f, cld_f, cf, map, true); }
void nmethods_do(CodeBlobClosure* cf);
// RedefineClasses support for finding live interpreted methods on the stack
--- a/hotspot/src/share/vm/runtime/handles.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/handles.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -27,21 +27,18 @@
#include "oops/constantPool.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
+#include "runtime/thread.inline.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
-# include "thread_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
-# include "thread_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
-# include "thread_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
-# include "thread_bsd.inline.hpp"
#endif
#ifdef ASSERT
--- a/hotspot/src/share/vm/runtime/handles.inline.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/handles.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,18 +26,7 @@
#define SHARE_VM_RUNTIME_HANDLES_INLINE_HPP
#include "runtime/handles.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
+#include "runtime/thread.inline.hpp"
// these inline functions are in a separate file to break an include cycle
// between Thread and Handle
--- a/hotspot/src/share/vm/runtime/interfaceSupport.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/interfaceSupport.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -31,22 +31,11 @@
#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "runtime/safepoint.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/preserveException.hpp"
#include "utilities/top.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
// Wrapper for all entry points to the virtual machine.
// The HandleMarkCleaner is a faster version of HandleMark.
--- a/hotspot/src/share/vm/runtime/java.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/java.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -54,6 +54,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/statSampler.hpp"
#include "runtime/task.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/timer.hpp"
#include "runtime/vm_operations.hpp"
#include "services/memReporter.hpp"
@@ -79,18 +80,6 @@
#ifdef TARGET_ARCH_ppc
# include "vm_version_ppc.hpp"
#endif
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
#ifndef SERIALGC
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
--- a/hotspot/src/share/vm/runtime/javaCalls.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/javaCalls.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -39,18 +39,7 @@
#include "runtime/mutexLocker.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubRoutines.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
+#include "runtime/thread.inline.hpp"
// -----------------------------------------------------
// Implementation of JavaCallWrapper
--- a/hotspot/src/share/vm/runtime/javaCalls.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/javaCalls.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -29,6 +29,7 @@
#include "oops/method.hpp"
#include "runtime/handles.hpp"
#include "runtime/javaFrameAnchor.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/vmThread.hpp"
#ifdef TARGET_ARCH_x86
# include "jniTypes_x86.hpp"
@@ -45,18 +46,6 @@
#ifdef TARGET_ARCH_ppc
# include "jniTypes_ppc.hpp"
#endif
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
// A JavaCallWrapper is constructed before each JavaCall and destructed after the call.
// Its purpose is to allocate/deallocate a new handle block and to save/restore the last
--- a/hotspot/src/share/vm/runtime/jniHandles.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/jniHandles.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -28,18 +28,7 @@
#include "prims/jvmtiExport.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/mutexLocker.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
+#include "runtime/thread.inline.hpp"
JNIHandleBlock* JNIHandles::_global_handles = NULL;
--- a/hotspot/src/share/vm/runtime/memprofiler.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/memprofiler.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -35,19 +35,8 @@
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
#include "runtime/task.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/vmThread.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
#ifndef PRODUCT
--- a/hotspot/src/share/vm/runtime/mutex.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/mutex.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,22 +26,19 @@
#include "precompiled.hpp"
#include "runtime/mutex.hpp"
#include "runtime/osThread.hpp"
+#include "runtime/thread.inline.hpp"
#include "utilities/events.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "mutex_linux.inline.hpp"
-# include "thread_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "mutex_solaris.inline.hpp"
-# include "thread_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "mutex_windows.inline.hpp"
-# include "thread_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "mutex_bsd.inline.hpp"
-# include "thread_bsd.inline.hpp"
#endif
// o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
--- a/hotspot/src/share/vm/runtime/mutexLocker.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,20 +25,9 @@
#include "precompiled.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadLocalStorage.hpp"
#include "runtime/vmThread.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
// Mutexes used in the VM (see comment in mutexLocker.hpp):
//
--- a/hotspot/src/share/vm/runtime/objectMonitor.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/objectMonitor.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -34,25 +34,21 @@
#include "runtime/objectMonitor.inline.hpp"
#include "runtime/osThread.hpp"
#include "runtime/stubRoutines.hpp"
-#include "runtime/thread.hpp"
+#include "runtime/thread.inline.hpp"
#include "services/threadService.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/preserveException.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
-# include "thread_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
-# include "thread_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
-# include "thread_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
-# include "thread_bsd.inline.hpp"
#endif
#if defined(__GNUC__) && !defined(IA64)
--- a/hotspot/src/share/vm/runtime/os.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/os.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -44,6 +44,7 @@
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
#include "services/attachListener.hpp"
#include "services/memTracker.hpp"
#include "services/threadService.hpp"
@@ -51,19 +52,15 @@
#include "utilities/events.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
-# include "thread_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
-# include "thread_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
-# include "thread_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
-# include "thread_bsd.inline.hpp"
#endif
# include <signal.h>
--- a/hotspot/src/share/vm/runtime/safepoint.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/safepoint.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -48,6 +48,7 @@
#include "runtime/stubRoutines.hpp"
#include "runtime/sweeper.hpp"
#include "runtime/synchronizer.hpp"
+#include "runtime/thread.inline.hpp"
#include "services/memTracker.hpp"
#include "services/runtimeService.hpp"
#include "utilities/events.hpp"
@@ -71,18 +72,6 @@
# include "nativeInst_ppc.hpp"
# include "vmreg_ppc.inline.hpp"
#endif
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
#ifndef SERIALGC
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
#include "gc_implementation/shared/concurrentGCThread.hpp"
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -31,6 +31,7 @@
#include "compiler/abstractCompiler.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/compilerOracle.hpp"
+#include "compiler/disassembler.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/gcLocker.inline.hpp"
--- a/hotspot/src/share/vm/runtime/stubCodeGenerator.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/stubCodeGenerator.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,25 +23,13 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "code/codeCache.hpp"
#include "compiler/disassembler.hpp"
#include "oops/oop.inline.hpp"
#include "prims/forte.hpp"
#include "runtime/stubCodeGenerator.hpp"
-#ifdef TARGET_ARCH_x86
-# include "assembler_x86.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "assembler_sparc.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "assembler_zero.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_arm
-# include "assembler_arm.inline.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "assembler_ppc.inline.hpp"
-#endif
// Implementation of StubCodeDesc
--- a/hotspot/src/share/vm/runtime/synchronizer.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/synchronizer.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -36,24 +36,21 @@
#include "runtime/osThread.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
+#include "runtime/thread.inline.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
#include "utilities/preserveException.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
-# include "thread_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
-# include "thread_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
-# include "thread_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
-# include "thread_bsd.inline.hpp"
#endif
#if defined(__GNUC__) && !defined(IA64)
--- a/hotspot/src/share/vm/runtime/task.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/task.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,22 +26,19 @@
#include "memory/allocation.hpp"
#include "runtime/init.hpp"
#include "runtime/task.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/timer.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
-# include "thread_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
-# include "thread_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
-# include "thread_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
-# include "thread_bsd.inline.hpp"
#endif
int PeriodicTask::_num_tasks = 0;
--- a/hotspot/src/share/vm/runtime/thread.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/thread.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -65,6 +65,7 @@
#include "runtime/statSampler.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/task.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
#include "runtime/threadLocalStorage.hpp"
#include "runtime/vframe.hpp"
@@ -83,19 +84,15 @@
#include "utilities/preserveException.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
-# include "thread_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
-# include "thread_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
-# include "thread_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
-# include "thread_bsd.inline.hpp"
#endif
#ifndef SERIALGC
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
@@ -826,7 +823,7 @@
return false;
}
-void Thread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
+void Thread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
active_handles()->oops_do(f);
// Do oop for ThreadShadow
f->do_oop((oop*)&_pending_exception);
@@ -2705,7 +2702,7 @@
}
};
-void JavaThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
+void JavaThread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
// Verify that the deferred card marks have been flushed.
assert(deferred_card_mark().is_empty(), "Should be empty during GC");
@@ -2713,7 +2710,7 @@
// since there may be more than one thread using each ThreadProfiler.
// Traverse the GCHandles
- Thread::oops_do(f, cf);
+ Thread::oops_do(f, cld_f, cf);
assert( (!has_last_Java_frame() && java_call_counter() == 0) ||
(has_last_Java_frame() && java_call_counter() > 0), "wrong java_sp info!");
@@ -2741,7 +2738,7 @@
// Traverse the execution stack
for(StackFrameStream fst(this); !fst.is_done(); fst.next()) {
- fst.current()->oops_do(f, cf, fst.register_map());
+ fst.current()->oops_do(f, cld_f, cf, fst.register_map());
}
}
@@ -2875,7 +2872,7 @@
void JavaThread::verify() {
// Verify oops in the thread.
- oops_do(&VerifyOopClosure::verify_oop, NULL);
+ oops_do(&VerifyOopClosure::verify_oop, NULL, NULL);
// Verify the stack frames.
frames_do(frame_verify);
@@ -3125,7 +3122,7 @@
static void oops_print(frame* f, const RegisterMap *map) {
PrintAndVerifyOopClosure print;
f->print_value();
- f->oops_do(&print, NULL, (RegisterMap*)map);
+ f->oops_do(&print, NULL, NULL, (RegisterMap*)map);
}
// Print our all the locations that contain oops and whether they are
@@ -3227,8 +3224,8 @@
#endif
}
-void CompilerThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
- JavaThread::oops_do(f, cf);
+void CompilerThread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
+ JavaThread::oops_do(f, cld_f, cf);
if (_scanned_nmethod != NULL && cf != NULL) {
// Safepoints can occur when the sweeper is scanning an nmethod so
// process it here to make sure it isn't unloaded in the middle of
@@ -3334,6 +3331,9 @@
jint os_init_2_result = os::init_2();
if (os_init_2_result != JNI_OK) return os_init_2_result;
+ jint adjust_after_os_result = Arguments::adjust_after_os();
+ if (adjust_after_os_result != JNI_OK) return adjust_after_os_result;
+
// intialize TLS
ThreadLocalStorage::init();
@@ -3667,7 +3667,7 @@
}
// initialize compiler(s)
-#if defined(COMPILER1) || defined(COMPILER2)
+#if defined(COMPILER1) || defined(COMPILER2) || defined(SHARK)
CompileBroker::compilation_init();
#endif
@@ -4198,14 +4198,14 @@
// uses the Threads_lock to gurantee this property. It also makes sure that
// all threads gets blocked when exiting or starting).
-void Threads::oops_do(OopClosure* f, CodeBlobClosure* cf) {
+void Threads::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
ALL_JAVA_THREADS(p) {
- p->oops_do(f, cf);
+ p->oops_do(f, cld_f, cf);
}
- VMThread::vm_thread()->oops_do(f, cf);
+ VMThread::vm_thread()->oops_do(f, cld_f, cf);
}
-void Threads::possibly_parallel_oops_do(OopClosure* f, CodeBlobClosure* cf) {
+void Threads::possibly_parallel_oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
// Introduce a mechanism allowing parallel threads to claim threads as
// root groups. Overhead should be small enough to use all the time,
// even in sequential code.
@@ -4222,12 +4222,12 @@
int cp = SharedHeap::heap()->strong_roots_parity();
ALL_JAVA_THREADS(p) {
if (p->claim_oops_do(is_par, cp)) {
- p->oops_do(f, cf);
+ p->oops_do(f, cld_f, cf);
}
}
VMThread* vmt = VMThread::vm_thread();
if (vmt->claim_oops_do(is_par, cp)) {
- vmt->oops_do(f, cf);
+ vmt->oops_do(f, cld_f, cf);
}
}
--- a/hotspot/src/share/vm/runtime/thread.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/thread.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -480,8 +480,10 @@
// GC support
// Apply "f->do_oop" to all root oops in "this".
+ // Apply "cld_f->do_cld" to CLDs that are otherwise not kept alive.
+ // Used by JavaThread::oops_do.
// Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
- virtual void oops_do(OopClosure* f, CodeBlobClosure* cf);
+ virtual void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
// Handles the parallel case for the method below.
private:
@@ -1405,7 +1407,7 @@
void frames_do(void f(frame*, const RegisterMap*));
// Memory operations
- void oops_do(OopClosure* f, CodeBlobClosure* cf);
+ void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
// Sweeper operations
void nmethods_do(CodeBlobClosure* cf);
@@ -1825,7 +1827,7 @@
// GC support
// Apply "f->do_oop" to all root oops in "this".
// Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
- void oops_do(OopClosure* f, CodeBlobClosure* cf);
+ void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
#ifndef PRODUCT
private:
@@ -1892,9 +1894,9 @@
// Apply "f->do_oop" to all root oops in all threads.
// This version may only be called by sequential code.
- static void oops_do(OopClosure* f, CodeBlobClosure* cf);
+ static void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
// This version may be called by sequential or parallel code.
- static void possibly_parallel_oops_do(OopClosure* f, CodeBlobClosure* cf);
+ static void possibly_parallel_oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
// This creates a list of GCTasks, one per thread.
static void create_thread_roots_tasks(GCTaskQueue* q);
// This creates a list of GCTasks, one per thread, for marking objects.
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/runtime/thread.inline.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_THREAD_INLINE_HPP
+#define SHARE_VM_RUNTIME_THREAD_INLINE_HPP
+
+#define SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
+
+#include "runtime/thread.hpp"
+#ifdef TARGET_OS_FAMILY_linux
+# include "thread_linux.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_solaris
+# include "thread_solaris.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_windows
+# include "thread_windows.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_bsd
+# include "thread_bsd.inline.hpp"
+#endif
+
+#undef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
+
+#endif // SHARE_VM_RUNTIME_THREAD_INLINE_HPP
--- a/hotspot/src/share/vm/runtime/threadLocalStorage.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/threadLocalStorage.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -23,22 +23,19 @@
*/
#include "precompiled.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadLocalStorage.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
-# include "thread_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
-# include "thread_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
-# include "thread_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
-# include "thread_bsd.inline.hpp"
#endif
// static member initialization
--- a/hotspot/src/share/vm/runtime/timer.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/timer.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -210,8 +210,9 @@
} else {
_logfile->print("[Error in TraceCPUTime]");
}
- if (_print_cr) {
+ if (_print_cr) {
_logfile->print_cr("");
}
+ _logfile->flush();
}
}
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -95,6 +95,7 @@
#include "runtime/serviceThread.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/virtualspace.hpp"
#include "runtime/vmStructs.hpp"
#include "utilities/array.hpp"
@@ -115,18 +116,6 @@
#ifdef TARGET_ARCH_ppc
# include "vmStructs_ppc.hpp"
#endif
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
#ifdef TARGET_OS_ARCH_linux_x86
# include "vmStructs_linux_x86.hpp"
#endif
@@ -366,7 +355,6 @@
nonstatic_field(Method, _access_flags, AccessFlags) \
nonstatic_field(Method, _vtable_index, int) \
nonstatic_field(Method, _method_size, u2) \
- nonstatic_field(Method, _max_stack, u2) \
nonstatic_field(Method, _max_locals, u2) \
nonstatic_field(Method, _size_of_parameters, u2) \
nonstatic_field(Method, _interpreter_throwout_count, u2) \
@@ -389,7 +377,7 @@
nonstatic_field(ConstMethod, _name_index, u2) \
nonstatic_field(ConstMethod, _signature_index, u2) \
nonstatic_field(ConstMethod, _method_idnum, u2) \
- nonstatic_field(ConstMethod, _generic_signature_index, u2) \
+ nonstatic_field(ConstMethod, _max_stack, u2) \
nonstatic_field(ObjArrayKlass, _element_klass, Klass*) \
nonstatic_field(ObjArrayKlass, _bottom_klass, Klass*) \
volatile_nonstatic_field(Symbol, _refcount, int) \
@@ -2107,8 +2095,7 @@
declare_toplevel_type(FreeList<Metablock>*) \
declare_toplevel_type(FreeList<Metablock>) \
declare_toplevel_type(MetablockTreeDictionary*) \
- declare_type(MetablockTreeDictionary, FreeBlockDictionary<Metablock>) \
- declare_type(MetablockTreeDictionary, FreeBlockDictionary<Metablock>)
+ declare_type(MetablockTreeDictionary, FreeBlockDictionary<Metablock>)
/* NOTE that we do not use the last_entry() macro here; it is used */
@@ -2292,6 +2279,7 @@
declare_constant(ConstMethod::_has_checked_exceptions) \
declare_constant(ConstMethod::_has_localvariable_table) \
declare_constant(ConstMethod::_has_exception_table) \
+ declare_constant(ConstMethod::_has_generic_signature) \
\
/*************************************/ \
/* InstanceKlass enum */ \
@@ -3215,3 +3203,17 @@
void vmStructs_init() {
debug_only(VMStructs::init());
}
+
+#ifndef PRODUCT
+void VMStructs::test() {
+ // Check for duplicate entries in type array
+ for (int i = 0; localHotSpotVMTypes[i].typeName != NULL; i++) {
+ for (int j = i + 1; localHotSpotVMTypes[j].typeName != NULL; j++) {
+ if (strcmp(localHotSpotVMTypes[i].typeName, localHotSpotVMTypes[j].typeName) == 0) {
+ tty->print_cr("Duplicate entries for '%s'", localHotSpotVMTypes[i].typeName);
+ assert(false, "Duplicate types in localHotSpotVMTypes array");
+ }
+ }
+ }
+}
+#endif
--- a/hotspot/src/share/vm/runtime/vmStructs.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/vmStructs.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -123,6 +123,11 @@
// the data structure (debug build only)
static void init();
+#ifndef PRODUCT
+ // Execute unit tests
+ static void test();
+#endif
+
private:
// Look up a type in localHotSpotVMTypes using strcmp() (debug build only).
// Returns 1 if found, 0 if not.
--- a/hotspot/src/share/vm/runtime/vmThread.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/vmThread.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -31,24 +31,13 @@
#include "runtime/interfaceSupport.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/vmThread.hpp"
#include "runtime/vm_operations.hpp"
#include "services/runtimeService.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
#include "utilities/xmlstream.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
#ifndef USDT2
HS_DTRACE_PROBE_DECL3(hotspot, vmops__request, char *, uintptr_t, int);
@@ -668,8 +657,8 @@
}
-void VMThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
- Thread::oops_do(f, cf);
+void VMThread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
+ Thread::oops_do(f, cld_f, cf);
_vm_queue->oops_do(f);
}
@@ -701,5 +690,5 @@
#endif
void VMThread::verify() {
- oops_do(&VerifyOopClosure::verify_oop, NULL);
+ oops_do(&VerifyOopClosure::verify_oop, NULL, NULL);
}
--- a/hotspot/src/share/vm/runtime/vmThread.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/vmThread.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,19 +26,8 @@
#define SHARE_VM_RUNTIME_VMTHREAD_HPP
#include "runtime/perfData.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/vm_operations.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
//
// Prioritized queue of VM operations.
@@ -137,7 +126,7 @@
static VMThread* vm_thread() { return _vm_thread; }
// GC support
- void oops_do(OopClosure* f, CodeBlobClosure* cf);
+ void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
// Debugging
void print_on(outputStream* st) const;
--- a/hotspot/src/share/vm/runtime/vm_operations.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/vm_operations.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -34,20 +34,9 @@
#include "runtime/deoptimization.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/sweeper.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/vm_operations.hpp"
#include "services/threadService.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
#define VM_OP_NAME_INITIALIZE(name) #name,
--- a/hotspot/src/share/vm/runtime/vm_version.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/runtime/vm_version.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -243,19 +243,21 @@
#ifndef FLOAT_ARCH
#if defined(__SOFTFP__)
- #define FLOAT_ARCH "-sflt"
+ #define FLOAT_ARCH_STR "-sflt"
#elif defined(E500V2)
- #define FLOAT_ARCH "-e500v2"
+ #define FLOAT_ARCH_STR "-e500v2"
#elif defined(ARM)
- #define FLOAT_ARCH "-vfp"
+ #define FLOAT_ARCH_STR "-vfp"
#elif defined(PPC)
- #define FLOAT_ARCH "-hflt"
+ #define FLOAT_ARCH_STR "-hflt"
#else
- #define FLOAT_ARCH ""
+ #define FLOAT_ARCH_STR ""
#endif
+ #else
+ #define FLOAT_ARCH_STR XSTR(FLOAT_ARCH)
#endif
- return VMNAME " (" VM_RELEASE ") for " OS "-" CPU FLOAT_ARCH
+ return VMNAME " (" VM_RELEASE ") for " OS "-" CPU FLOAT_ARCH_STR
" JRE (" JRE_RELEASE_VERSION "), built on " __DATE__ " " __TIME__
" by " XSTR(HOTSPOT_BUILD_USER) " with " HOTSPOT_BUILD_COMPILER;
}
--- a/hotspot/src/share/vm/services/memSnapshot.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/services/memSnapshot.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -261,17 +261,19 @@
VMMemRegion* cur = (VMMemRegion*)current();
assert(cur->is_reserved_region() && cur->contains_region(rec),
"Sanity check");
+ if (rec->is_same_region(cur)) {
+ // release whole reserved region
#ifdef ASSERT
- VMMemRegion* next_reg = (VMMemRegion*)peek_next();
- // should not have any committed memory in this reserved region
- assert(next_reg == NULL || !next_reg->is_committed_region(), "Sanity check");
+ VMMemRegion* next_region = (VMMemRegion*)peek_next();
+ // should not have any committed memory in this reserved region
+ assert(next_region == NULL || !next_region->is_committed_region(), "Sanity check");
#endif
- if (rec->is_same_region(cur)) {
remove();
} else if (rec->addr() == cur->addr() ||
rec->addr() + rec->size() == cur->addr() + cur->size()) {
// released region is at either end of this region
cur->exclude_region(rec->addr(), rec->size());
+ assert(check_reserved_region(), "Integrity check");
} else { // split the reserved region and release the middle
address high_addr = cur->addr() + cur->size();
size_t sz = high_addr - rec->addr();
@@ -280,10 +282,14 @@
if (MemTracker::track_callsite()) {
MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
((VMMemRegionEx*)cur)->pc());
- return insert_reserved_region(&tmp);
+ bool ret = insert_reserved_region(&tmp);
+ assert(!ret || check_reserved_region(), "Integrity check");
+ return ret;
} else {
MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
- return insert_reserved_region(&tmp);
+ bool ret = insert_reserved_region(&tmp);
+ assert(!ret || check_reserved_region(), "Integrity check");
+ return ret;
}
}
return true;
--- a/hotspot/src/share/vm/services/memSnapshot.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/services/memSnapshot.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -146,6 +146,23 @@
// reset current position
inline void reset() { _pos = 0; }
#ifdef ASSERT
+ // check integrity of records on current reserved memory region.
+ bool check_reserved_region() {
+ VMMemRegion* reserved_region = (VMMemRegion*)current();
+ assert(reserved_region != NULL && reserved_region->is_reserved_region(),
+ "Sanity check");
+ // all committed regions that follow current reserved region, should all
+ // belong to the reserved region.
+ VMMemRegion* next_region = (VMMemRegion*)next();
+ for (; next_region != NULL && next_region->is_committed_region();
+ next_region = (VMMemRegion*)next() ) {
+ if(!reserved_region->contains_region(next_region)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
virtual bool is_dup_pointer(const MemPointer* ptr1,
const MemPointer* ptr2) const {
VMMemRegion* p1 = (VMMemRegion*)ptr1;
--- a/hotspot/src/share/vm/services/memTracker.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/services/memTracker.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -69,15 +69,12 @@
void MemTracker::init_tracking_options(const char* option_line) {
_tracking_level = NMT_off;
- if (strncmp(option_line, "=summary", 8) == 0) {
+ if (strcmp(option_line, "=summary") == 0) {
_tracking_level = NMT_summary;
- } else if (strncmp(option_line, "=detail", 7) == 0) {
+ } else if (strcmp(option_line, "=detail") == 0) {
_tracking_level = NMT_detail;
- } else {
- char msg[255];
- //+1 to remove the '=' character
- jio_snprintf(msg, 255, "Unknown option given to XX:NativeMemoryTracking: %s", option_line+1);
- vm_exit_during_initialization(msg, NULL);
+ } else if (strcmp(option_line, "=off") != 0) {
+ vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
}
}
--- a/hotspot/src/share/vm/services/memTracker.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/services/memTracker.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -109,10 +109,6 @@
#include "services/memSnapshot.hpp"
#include "services/memTrackWorker.hpp"
-#ifdef SOLARIS
-#include "thread_solaris.inline.hpp"
-#endif
-
extern bool NMT_track_callsite;
#ifdef ASSERT
--- a/hotspot/src/share/vm/shark/llvmHeaders.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/llvmHeaders.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -35,6 +35,7 @@
#undef DEBUG
#endif
+#include <llvm/Analysis/Verifier.h>
#include <llvm/Argument.h>
#include <llvm/Constants.h>
#include <llvm/DerivedTypes.h>
@@ -42,29 +43,21 @@
#include <llvm/Instructions.h>
#include <llvm/LLVMContext.h>
#include <llvm/Module.h>
-#if SHARK_LLVM_VERSION < 27
-#include <llvm/ModuleProvider.h>
-#endif
+#if SHARK_LLVM_VERSION <= 31
#include <llvm/Support/IRBuilder.h>
-#if SHARK_LLVM_VERSION >= 29
+#else
+#include <llvm/IRBuilder.h>
+#endif
#include <llvm/Support/Threading.h>
-#else
-#include <llvm/System/Threading.h>
-#endif
-#include <llvm/Target/TargetSelect.h>
+#include <llvm/Support/TargetSelect.h>
#include <llvm/Type.h>
#include <llvm/ExecutionEngine/JITMemoryManager.h>
#include <llvm/Support/CommandLine.h>
-#if SHARK_LLVM_VERSION >= 27
+#include <llvm/ExecutionEngine/MCJIT.h>
#include <llvm/ExecutionEngine/JIT.h>
#include <llvm/ADT/StringMap.h>
#include <llvm/Support/Debug.h>
-#if SHARK_LLVM_VERSION >= 29
#include <llvm/Support/Host.h>
-#else
-#include <llvm/System/Host.h>
-#endif
-#endif
#include <map>
--- a/hotspot/src/share/vm/shark/llvmValue.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/llvmValue.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -56,6 +56,10 @@
{
return llvm::ConstantPointerNull::get(SharkType::oop_type());
}
+ static llvm::ConstantPointerNull* nullKlass()
+ {
+ return llvm::ConstantPointerNull::get(SharkType::klass_type());
+ }
public:
static llvm::ConstantInt* bit_constant(int value)
--- a/hotspot/src/share/vm/shark/sharkBlock.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkBlock.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -170,10 +170,12 @@
case Bytecodes::_ldc:
case Bytecodes::_ldc_w:
- case Bytecodes::_ldc2_w:
- push(SharkConstant::for_ldc(iter())->value(builder()));
+ case Bytecodes::_ldc2_w: {
+ SharkConstant* constant = SharkConstant::for_ldc(iter());
+ assert(constant->is_loaded(), "trap should handle unloaded classes");
+ push(constant->value(builder()));
break;
-
+ }
case Bytecodes::_iload_0:
case Bytecodes::_lload_0:
case Bytecodes::_fload_0:
@@ -1000,9 +1002,9 @@
builder()->SetInsertPoint(done);
PHINode *result;
if (is_long)
- result = builder()->CreatePHI(SharkType::jlong_type(), "result");
+ result = builder()->CreatePHI(SharkType::jlong_type(), 0, "result");
else
- result = builder()->CreatePHI(SharkType::jint_type(), "result");
+ result = builder()->CreatePHI(SharkType::jint_type(), 0, "result");
result->addIncoming(special_result, special_case);
result->addIncoming(general_result, general_case);
@@ -1036,12 +1038,12 @@
value = constant->value(builder());
}
if (!is_get || value == NULL) {
- if (!is_field)
- object = builder()->CreateInlineOop(field->holder());
-
+ if (!is_field) {
+ object = builder()->CreateInlineOop(field->holder()->java_mirror());
+ }
BasicType basic_type = field->type()->basic_type();
- const Type *stack_type = SharkType::to_stackType(basic_type);
- const Type *field_type = SharkType::to_arrayType(basic_type);
+ Type *stack_type = SharkType::to_stackType(basic_type);
+ Type *field_type = SharkType::to_arrayType(basic_type);
Value *addr = builder()->CreateAddressOfStructEntry(
object, in_ByteSize(field->offset_in_bytes()),
@@ -1050,8 +1052,12 @@
// Do the access
if (is_get) {
- Value *field_value = builder()->CreateLoad(addr);
-
+ Value* field_value;
+ if (field->is_volatile()) {
+ field_value = builder()->CreateAtomicLoad(addr);
+ } else {
+ field_value = builder()->CreateLoad(addr);
+ }
if (field_type != stack_type) {
field_value = builder()->CreateIntCast(
field_value, stack_type, basic_type != T_CHAR);
@@ -1067,13 +1073,15 @@
field_value, field_type, basic_type != T_CHAR);
}
- builder()->CreateStore(field_value, addr);
+ if (field->is_volatile()) {
+ builder()->CreateAtomicStore(field_value, addr);
+ } else {
+ builder()->CreateStore(field_value, addr);
+ }
- if (!field->type()->is_primitive_type())
+ if (!field->type()->is_primitive_type()) {
builder()->CreateUpdateBarrierSet(oopDesc::bs(), addr);
-
- if (field->is_volatile())
- builder()->CreateMemoryBarrier(SharkBuilder::BARRIER_STORELOAD);
+ }
}
}
@@ -1105,7 +1113,7 @@
builder()->CreateBr(done);
builder()->SetInsertPoint(done);
- PHINode *result = builder()->CreatePHI(SharkType::jint_type(), "result");
+ PHINode *result = builder()->CreatePHI(SharkType::jint_type(), 0, "result");
result->addIncoming(LLVMValue::jint_constant(-1), lt);
result->addIncoming(LLVMValue::jint_constant(0), eq);
result->addIncoming(LLVMValue::jint_constant(1), gt);
@@ -1152,7 +1160,7 @@
builder()->CreateBr(done);
builder()->SetInsertPoint(done);
- PHINode *result = builder()->CreatePHI(SharkType::jint_type(), "result");
+ PHINode *result = builder()->CreatePHI(SharkType::jint_type(), 0, "result");
result->addIncoming(LLVMValue::jint_constant(-1), lt);
result->addIncoming(LLVMValue::jint_constant(0), eq);
result->addIncoming(LLVMValue::jint_constant(1), gt);
--- a/hotspot/src/share/vm/shark/sharkBuilder.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkBuilder.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -47,14 +47,14 @@
// Helpers for accessing structures
Value* SharkBuilder::CreateAddressOfStructEntry(Value* base,
ByteSize offset,
- const Type* type,
+ Type* type,
const char* name) {
return CreateBitCast(CreateStructGEP(base, in_bytes(offset)), type, name);
}
LoadInst* SharkBuilder::CreateValueOfStructEntry(Value* base,
ByteSize offset,
- const Type* type,
+ Type* type,
const char* name) {
return CreateLoad(
CreateAddressOfStructEntry(
@@ -71,7 +71,7 @@
}
Value* SharkBuilder::CreateArrayAddress(Value* arrayoop,
- const Type* element_type,
+ Type* element_type,
int element_bytes,
ByteSize base_offset,
Value* index,
@@ -114,7 +114,7 @@
// Helpers for creating intrinsics and external functions.
-const Type* SharkBuilder::make_type(char type, bool void_ok) {
+Type* SharkBuilder::make_type(char type, bool void_ok) {
switch (type) {
// Primitive types
case 'c':
@@ -146,6 +146,8 @@
return PointerType::getUnqual(SharkType::monitor_type());
case 'O':
return SharkType::oop_type();
+ case 'K':
+ return SharkType::klass_type();
// Miscellaneous
case 'v':
@@ -159,14 +161,14 @@
}
}
-const FunctionType* SharkBuilder::make_ftype(const char* params,
+FunctionType* SharkBuilder::make_ftype(const char* params,
const char* ret) {
- std::vector<const Type*> param_types;
+ std::vector<Type*> param_types;
for (const char* c = params; *c; c++)
param_types.push_back(make_type(*c, false));
assert(strlen(ret) == 1, "should be");
- const Type *return_type = make_type(*ret, true);
+ Type *return_type = make_type(*ret, true);
return FunctionType::get(return_type, param_types, false);
}
@@ -274,7 +276,7 @@
}
Value* SharkBuilder::is_subtype_of() {
- return make_function((address) SharkRuntime::is_subtype_of, "OO", "c");
+ return make_function((address) SharkRuntime::is_subtype_of, "KK", "c");
}
Value* SharkBuilder::current_time_millis() {
@@ -352,79 +354,14 @@
"T", "v");
}
-// Low-level non-VM calls
-
-// The ARM-specific code here is to work around unimplemented
-// atomic exchange and memory barrier intrinsics in LLVM.
-//
-// Delegating to external functions for these would normally
-// incur a speed penalty, but Linux on ARM is a special case
-// in that atomic operations on that platform are handled by
-// external functions anyway. It would be *preferable* for
-// the calls to be hidden away in LLVM, but it's not hurting
-// performance so having the calls here is acceptable.
-//
-// If you are building Shark on a platform without atomic
-// exchange and/or memory barrier intrinsics then it is only
-// acceptable to mimic this approach if your platform cannot
-// perform these operations without delegating to a function.
-
-#ifdef ARM
-static jint zero_cmpxchg_int(volatile jint *ptr, jint oldval, jint newval) {
- return Atomic::cmpxchg(newval, ptr, oldval);
-}
-#endif // ARM
-
-Value* SharkBuilder::cmpxchg_int() {
- return make_function(
-#ifdef ARM
- (address) zero_cmpxchg_int,
-#else
- "llvm.atomic.cmp.swap.i32.p0i32",
-#endif // ARM
- "Iii", "i");
-}
-
-#ifdef ARM
-static intptr_t zero_cmpxchg_ptr(volatile intptr_t* ptr,
- intptr_t oldval,
- intptr_t newval) {
- return Atomic::cmpxchg_ptr(newval, ptr, oldval);
-}
-#endif // ARM
-
-Value* SharkBuilder::cmpxchg_ptr() {
- return make_function(
-#ifdef ARM
- (address) zero_cmpxchg_ptr,
-#else
- "llvm.atomic.cmp.swap.i" LP64_ONLY("64") NOT_LP64("32") ".p0i" LP64_ONLY("64") NOT_LP64("32"),
-#endif // ARM
- "Xxx", "x");
-}
-
Value* SharkBuilder::frame_address() {
return make_function("llvm.frameaddress", "i", "C");
}
-Value* SharkBuilder::memory_barrier() {
- return make_function(
-#ifdef ARM
- (address) 0xffff0fa0, // __kernel_dmb
-#else
- "llvm.memory.barrier",
-#endif // ARM
- "11111", "v");
-}
-
Value* SharkBuilder::memset() {
-#if SHARK_LLVM_VERSION >= 28
// LLVM 2.8 added a fifth isVolatile field for memset
// introduced with LLVM r100304
- return make_function("llvm.memset.i32", "Cciii", "v");
-#else
- return make_function("llvm.memset.i32", "Ccii", "v");
-#endif
+ return make_function("llvm.memset.p0i8.i32", "Cciii", "v");
}
Value* SharkBuilder::unimplemented() {
@@ -441,43 +378,16 @@
// Public interface to low-level non-VM calls
-CallInst* SharkBuilder::CreateCmpxchgInt(Value* exchange_value,
- Value* dst,
- Value* compare_value) {
- return CreateCall3(cmpxchg_int(), dst, compare_value, exchange_value);
-}
-
-CallInst* SharkBuilder::CreateCmpxchgPtr(Value* exchange_value,
- Value* dst,
- Value* compare_value) {
- return CreateCall3(cmpxchg_ptr(), dst, compare_value, exchange_value);
-}
-
CallInst* SharkBuilder::CreateGetFrameAddress() {
return CreateCall(frame_address(), LLVMValue::jint_constant(0));
}
-CallInst *SharkBuilder::CreateMemoryBarrier(int flags) {
- Value *args[] = {
- LLVMValue::bit_constant((flags & BARRIER_LOADLOAD) ? 1 : 0),
- LLVMValue::bit_constant((flags & BARRIER_LOADSTORE) ? 1 : 0),
- LLVMValue::bit_constant((flags & BARRIER_STORELOAD) ? 1 : 0),
- LLVMValue::bit_constant((flags & BARRIER_STORESTORE) ? 1 : 0),
- LLVMValue::bit_constant(1)};
-
- return CreateCall(memory_barrier(), args, args + 5);
-}
-
CallInst* SharkBuilder::CreateMemset(Value* dst,
Value* value,
Value* len,
Value* align) {
-#if SHARK_LLVM_VERSION >= 28
return CreateCall5(memset(), dst, value, len, align,
LLVMValue::jint_constant(0));
-#else
- return CreateCall4(memset(), dst, value, len, align);
-#endif
}
CallInst* SharkBuilder::CreateUnimplemented(const char* file, int line) {
@@ -510,11 +420,7 @@
if (isa<PointerType>(value->getType()))
value = CreatePtrToInt(value, SharkType::intptr_type());
else if (value->getType()->
-#if SHARK_LLVM_VERSION >= 27
isIntegerTy()
-#else
- isInteger()
-#endif
)
value = CreateIntCast(value, SharkType::intptr_type(), false);
else
@@ -563,9 +469,19 @@
name);
}
+Value* SharkBuilder::CreateInlineMetadata(Metadata* metadata, llvm::PointerType* type, const char* name) {
+ assert(metadata != NULL, "inlined metadata must not be NULL");
+ assert(metadata->is_metadata(), "sanity check");
+ return CreateLoad(
+ CreateIntToPtr(
+ code_buffer_address(code_buffer()->inline_Metadata(metadata)),
+ PointerType::getUnqual(type)),
+ name);
+}
+
Value* SharkBuilder::CreateInlineData(void* data,
size_t size,
- const Type* type,
+ Type* type,
const char* name) {
return CreateIntToPtr(
code_buffer_address(code_buffer()->inline_data(data, size)),
@@ -600,3 +516,11 @@
return BasicBlock::Create(
SharkContext::current(), name, GetInsertBlock()->getParent(), ip);
}
+
+LoadInst* SharkBuilder::CreateAtomicLoad(Value* ptr, unsigned align, AtomicOrdering ordering, SynchronizationScope synchScope, bool isVolatile, const char* name) {
+ return Insert(new LoadInst(ptr, name, isVolatile, align, ordering, synchScope), name);
+}
+
+StoreInst* SharkBuilder::CreateAtomicStore(Value* val, Value* ptr, unsigned align, AtomicOrdering ordering, SynchronizationScope synchScope, bool isVolatile, const char* name) {
+ return Insert(new StoreInst(val, ptr, isVolatile, align, ordering, synchScope), name);
+}
--- a/hotspot/src/share/vm/shark/sharkBuilder.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkBuilder.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -53,22 +53,37 @@
return _code_buffer;
}
+ public:
+ llvm::LoadInst* CreateAtomicLoad(llvm::Value* ptr,
+ unsigned align = HeapWordSize,
+ llvm::AtomicOrdering ordering = llvm::SequentiallyConsistent,
+ llvm::SynchronizationScope synchScope = llvm::CrossThread,
+ bool isVolatile = true,
+ const char *name = "");
+ llvm::StoreInst* CreateAtomicStore(llvm::Value *val,
+ llvm::Value *ptr,
+ unsigned align = HeapWordSize,
+ llvm::AtomicOrdering ordering = llvm::SequentiallyConsistent,
+ llvm::SynchronizationScope SynchScope = llvm::CrossThread,
+ bool isVolatile = true,
+ const char *name = "");
+
// Helpers for accessing structures.
public:
llvm::Value* CreateAddressOfStructEntry(llvm::Value* base,
ByteSize offset,
- const llvm::Type* type,
+ llvm::Type* type,
const char *name = "");
llvm::LoadInst* CreateValueOfStructEntry(llvm::Value* base,
ByteSize offset,
- const llvm::Type* type,
+ llvm::Type* type,
const char *name = "");
// Helpers for accessing arrays.
public:
llvm::LoadInst* CreateArrayLength(llvm::Value* arrayoop);
llvm::Value* CreateArrayAddress(llvm::Value* arrayoop,
- const llvm::Type* element_type,
+ llvm::Type* element_type,
int element_bytes,
ByteSize base_offset,
llvm::Value* index,
@@ -85,8 +100,8 @@
// Helpers for creating intrinsics and external functions.
private:
- static const llvm::Type* make_type(char type, bool void_ok);
- static const llvm::FunctionType* make_ftype(const char* params,
+ static llvm::Type* make_type(char type, bool void_ok);
+ static llvm::FunctionType* make_ftype(const char* params,
const char* ret);
llvm::Value* make_function(const char* name,
const char* params,
@@ -165,7 +180,6 @@
llvm::Value* cmpxchg_int();
llvm::Value* cmpxchg_ptr();
llvm::Value* frame_address();
- llvm::Value* memory_barrier();
llvm::Value* memset();
llvm::Value* unimplemented();
llvm::Value* should_not_reach_here();
@@ -173,14 +187,7 @@
// Public interface to low-level non-VM calls.
public:
- llvm::CallInst* CreateCmpxchgInt(llvm::Value* exchange_value,
- llvm::Value* dst,
- llvm::Value* compare_value);
- llvm::CallInst* CreateCmpxchgPtr(llvm::Value* exchange_value,
- llvm::Value* dst,
- llvm::Value* compare_value);
llvm::CallInst* CreateGetFrameAddress();
- llvm::CallInst* CreateMemoryBarrier(int flags);
llvm::CallInst* CreateMemset(llvm::Value* dst,
llvm::Value* value,
llvm::Value* len,
@@ -189,15 +196,6 @@
llvm::CallInst* CreateShouldNotReachHere(const char* file, int line);
NOT_PRODUCT(llvm::CallInst* CreateDump(llvm::Value* value));
- // Flags for CreateMemoryBarrier.
- public:
- enum BarrierFlags {
- BARRIER_LOADLOAD = 1,
- BARRIER_LOADSTORE = 2,
- BARRIER_STORELOAD = 4,
- BARRIER_STORESTORE = 8
- };
-
// HotSpot memory barriers
public:
void CreateUpdateBarrierSet(BarrierSet* bs, llvm::Value* field);
@@ -209,9 +207,14 @@
llvm::Value* CreateInlineOop(ciObject* object, const char* name = "") {
return CreateInlineOop(object->constant_encoding(), name);
}
+
+ llvm::Value* CreateInlineMetadata(Metadata* metadata, llvm::PointerType* type, const char* name = "");
+ llvm::Value* CreateInlineMetadata(ciMetadata* metadata, llvm::PointerType* type, const char* name = "") {
+ return CreateInlineMetadata(metadata->constant_encoding(), type, name);
+ }
llvm::Value* CreateInlineData(void* data,
size_t size,
- const llvm::Type* type,
+ llvm::Type* type,
const char* name = "");
// Helpers for creating basic blocks.
@@ -222,5 +225,4 @@
llvm::BasicBlock* CreateBlock(llvm::BasicBlock* ip,
const char* name="") const;
};
-
-#endif // SHARE_VM_SHARK_SHARKBUILDER_HPP
+ #endif // SHARE_VM_SHARK_SHARKBUILDER_HPP
--- a/hotspot/src/share/vm/shark/sharkCacheDecache.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkCacheDecache.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -107,11 +107,10 @@
void SharkDecacher::process_method_slot(Value** value, int offset) {
// Decache the method pointer
write_value_to_frame(
- SharkType::Method*_type(),
+ SharkType::Method_type(),
*value,
offset);
- oopmap()->set_oop(slot2reg(offset));
}
void SharkDecacher::process_pc_slot(int offset) {
@@ -205,7 +204,7 @@
void SharkCacher::process_method_slot(Value** value, int offset) {
// Cache the method pointer
- *value = read_value_from_frame(SharkType::Method*_type(), offset);
+ *value = read_value_from_frame(SharkType::Method_type(), offset);
}
void SharkFunctionEntryCacher::process_method_slot(Value** value, int offset) {
@@ -230,7 +229,7 @@
}
Value* SharkOSREntryCacher::CreateAddressOfOSRBufEntry(int offset,
- const Type* type) {
+ Type* type) {
Value *result = builder()->CreateStructGEP(osr_buf(), offset);
if (type != SharkType::intptr_type())
result = builder()->CreateBitCast(result, PointerType::getUnqual(type));
@@ -254,12 +253,12 @@
}
}
-void SharkDecacher::write_value_to_frame(const Type* type,
+void SharkDecacher::write_value_to_frame(Type* type,
Value* value,
int offset) {
builder()->CreateStore(value, stack()->slot_addr(offset, type));
}
-Value* SharkCacher::read_value_from_frame(const Type* type, int offset) {
+Value* SharkCacher::read_value_from_frame(Type* type, int offset) {
return builder()->CreateLoad(stack()->slot_addr(offset, type));
}
--- a/hotspot/src/share/vm/shark/sharkCacheDecache.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkCacheDecache.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -192,7 +192,7 @@
// Writer helper
protected:
- void write_value_to_frame(const llvm::Type* type,
+ void write_value_to_frame(llvm::Type* type,
llvm::Value* value,
int offset);
};
@@ -321,7 +321,7 @@
// Writer helper
protected:
- llvm::Value* read_value_from_frame(const llvm::Type* type, int offset);
+ llvm::Value* read_value_from_frame(llvm::Type* type, int offset);
};
class SharkJavaCallCacher : public SharkCacher {
@@ -422,7 +422,7 @@
// Helper
private:
- llvm::Value* CreateAddressOfOSRBufEntry(int offset, const llvm::Type* type);
+ llvm::Value* CreateAddressOfOSRBufEntry(int offset, llvm::Type* type);
};
#endif // SHARE_VM_SHARK_SHARKCACHEDECACHE_HPP
--- a/hotspot/src/share/vm/shark/sharkCodeBuffer.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkCodeBuffer.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -81,6 +81,13 @@
return offset;
}
+ int inline_Metadata(Metadata* metadata) const {
+ masm()->align(BytesPerWord);
+ int offset = masm()->offset();
+ masm()->store_Metadata(metadata);
+ return offset;
+ }
+
// Inline a block of non-oop data into the buffer and return its offset.
public:
int inline_data(void *src, size_t size) const {
--- a/hotspot/src/share/vm/shark/sharkCompiler.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkCompiler.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -48,7 +48,6 @@
using namespace llvm;
-#if SHARK_LLVM_VERSION >= 27
namespace {
cl::opt<std::string>
MCPU("mcpu");
@@ -57,7 +56,6 @@
MAttrs("mattr",
cl::CommaSeparated);
}
-#endif
SharkCompiler::SharkCompiler()
: AbstractCompiler() {
@@ -72,6 +70,9 @@
// Initialize the native target
InitializeNativeTarget();
+ // MCJIT require a native AsmPrinter
+ InitializeNativeTargetAsmPrinter();
+
// Create the two contexts which we'll use
_normal_context = new SharkContext("normal");
_native_context = new SharkContext("native");
@@ -79,7 +80,6 @@
// Create the memory manager
_memory_manager = new SharkMemoryManager();
-#if SHARK_LLVM_VERSION >= 27
// Finetune LLVM for the current host CPU.
StringMap<bool> Features;
bool gotCpuFeatures = llvm::sys::getHostCPUFeatures(Features);
@@ -113,6 +113,16 @@
builder.setJITMemoryManager(memory_manager());
builder.setEngineKind(EngineKind::JIT);
builder.setErrorStr(&ErrorMsg);
+ if (! fnmatch(SharkOptimizationLevel, "None", 0)) {
+ tty->print_cr("Shark optimization level set to: None");
+ builder.setOptLevel(llvm::CodeGenOpt::None);
+ } else if (! fnmatch(SharkOptimizationLevel, "Less", 0)) {
+ tty->print_cr("Shark optimization level set to: Less");
+ builder.setOptLevel(llvm::CodeGenOpt::Less);
+ } else if (! fnmatch(SharkOptimizationLevel, "Aggressive", 0)) {
+ tty->print_cr("Shark optimization level set to: Aggressive");
+ builder.setOptLevel(llvm::CodeGenOpt::Aggressive);
+ } // else Default is selected by, well, default :-)
_execution_engine = builder.create();
if (!execution_engine()) {
@@ -125,13 +135,6 @@
execution_engine()->addModule(
_native_context->module());
-#else
- _execution_engine = ExecutionEngine::createJIT(
- _normal_context->module_provider(),
- NULL, memory_manager(), CodeGenOpt::Default);
- execution_engine()->addModuleProvider(
- _native_context->module_provider());
-#endif
// All done
mark_initialized();
@@ -261,6 +264,12 @@
function->dump();
}
+ if (SharkVerifyFunction != NULL) {
+ if (!fnmatch(SharkVerifyFunction, name, 0)) {
+ verifyFunction(*function);
+ }
+ }
+
// Compile to native code
address code = NULL;
context()->add_function(function);
@@ -268,33 +277,28 @@
MutexLocker locker(execution_engine_lock());
free_queued_methods();
+#ifndef NDEBUG
+#if SHARK_LLVM_VERSION <= 31
+#define setCurrentDebugType SetCurrentDebugType
+#endif
if (SharkPrintAsmOf != NULL) {
-#if SHARK_LLVM_VERSION >= 27
-#ifndef NDEBUG
if (!fnmatch(SharkPrintAsmOf, name, 0)) {
- llvm::SetCurrentDebugType(X86_ONLY("x86-emitter") NOT_X86("jit"));
+ llvm::setCurrentDebugType(X86_ONLY("x86-emitter") NOT_X86("jit"));
llvm::DebugFlag = true;
}
else {
- llvm::SetCurrentDebugType("");
+ llvm::setCurrentDebugType("");
llvm::DebugFlag = false;
}
+ }
+#ifdef setCurrentDebugType
+#undef setCurrentDebugType
+#endif
#endif // !NDEBUG
-#else
- // NB you need to patch LLVM with http://tinyurl.com/yf3baln for this
- std::vector<const char*> args;
- args.push_back(""); // program name
- if (!fnmatch(SharkPrintAsmOf, name, 0))
- args.push_back("-debug-only=x86-emitter");
- else
- args.push_back("-debug-only=none");
- args.push_back(0); // terminator
- cl::ParseCommandLineOptions(args.size() - 1, (char **) &args[0]);
-#endif // SHARK_LLVM_VERSION
- }
memory_manager()->set_entry_for_function(function, entry);
code = (address) execution_engine()->getPointerToFunction(function);
}
+ assert(code != NULL, "code must be != NULL");
entry->set_entry_point(code);
entry->set_function(function);
entry->set_context(context());
@@ -319,8 +323,8 @@
// finish with the exception of the VM thread, so we can consider
// ourself the owner of the execution engine lock even though we
// can't actually acquire it at this time.
- assert(Thread::current()->is_VM_thread(), "must be called by VM thread");
- assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+ assert(Thread::current()->is_Compiler_thread(), "must be called by compiler thread");
+ assert_locked_or_safepoint(CodeCache_lock);
SharkEntry *entry = (SharkEntry *) code;
entry->context()->push_to_free_queue(entry->function());
--- a/hotspot/src/share/vm/shark/sharkConstant.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkConstant.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -37,10 +37,8 @@
ciType *type = NULL;
if (constant.basic_type() == T_OBJECT) {
ciEnv *env = ciEnv::current();
- if (constant.as_object()->is_klass())
- type = env->Class_klass();
- else
- type = env->String_klass();
+ assert(constant.as_object()->klass() == env->String_klass() || constant.as_object()->klass() == env->Class_klass(), "should be");
+ type = constant.as_object()->klass();
}
return new SharkConstant(constant, type);
}
@@ -108,17 +106,16 @@
// objects (which differ between ldc* and get*, thanks!)
ciObject *object = constant.as_object();
assert(type != NULL, "shouldn't be");
- if (object->is_klass()) {
- // The constant returned for a klass is the ciKlass
- // for the entry, but we want the java_mirror.
- ciKlass *klass = object->as_klass();
- if (!klass->is_loaded()) {
+
+ if ((! object->is_null_object()) && object->klass() == ciEnv::current()->Class_klass()) {
+ ciKlass *klass = object->klass();
+ if (! klass->is_loaded()) {
_is_loaded = false;
return;
}
- object = klass->java_mirror();
}
- if (object->is_null_object() || !object->can_be_constant()) {
+
+ if (object->is_null_object() || ! object->can_be_constant() || ! object->is_loaded()) {
_is_loaded = false;
return;
}
--- a/hotspot/src/share/vm/shark/sharkContext.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkContext.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -29,6 +29,7 @@
#include "shark/llvmHeaders.hpp"
#include "shark/sharkContext.hpp"
#include "utilities/globalDefinitions.hpp"
+#include "memory/allocation.hpp"
using namespace llvm;
@@ -52,6 +53,9 @@
_itableOffsetEntry_type = PointerType::getUnqual(
ArrayType::get(jbyte_type(), itableOffsetEntry::size() * wordSize));
+ _Metadata_type = PointerType::getUnqual(
+ ArrayType::get(jbyte_type(), sizeof(Metadata)));
+
_klass_type = PointerType::getUnqual(
ArrayType::get(jbyte_type(), sizeof(Klass)));
@@ -61,7 +65,7 @@
_jniHandleBlock_type = PointerType::getUnqual(
ArrayType::get(jbyte_type(), sizeof(JNIHandleBlock)));
- _Method*_type = PointerType::getUnqual(
+ _Method_type = PointerType::getUnqual(
ArrayType::get(jbyte_type(), sizeof(Method)));
_monitor_type = ArrayType::get(
@@ -76,14 +80,14 @@
_zeroStack_type = PointerType::getUnqual(
ArrayType::get(jbyte_type(), sizeof(ZeroStack)));
- std::vector<const Type*> params;
- params.push_back(Method*_type());
+ std::vector<Type*> params;
+ params.push_back(Method_type());
params.push_back(intptr_type());
params.push_back(thread_type());
_entry_point_type = FunctionType::get(jint_type(), params, false);
params.clear();
- params.push_back(Method*_type());
+ params.push_back(Method_type());
params.push_back(PointerType::getUnqual(jbyte_type()));
params.push_back(intptr_type());
params.push_back(thread_type());
@@ -150,7 +154,7 @@
}
}
-class SharkFreeQueueItem : public CHeapObj {
+class SharkFreeQueueItem : public CHeapObj<mtNone> {
public:
SharkFreeQueueItem(llvm::Function* function, SharkFreeQueueItem *next)
: _function(function), _next(next) {}
--- a/hotspot/src/share/vm/shark/sharkContext.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkContext.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -42,11 +42,7 @@
private:
llvm::Module* _module;
-#if SHARK_LLVM_VERSION >= 27
public:
-#else
- private:
-#endif
llvm::Module* module() const {
return _module;
}
@@ -59,127 +55,126 @@
// Module accessors
public:
-#if SHARK_LLVM_VERSION < 27
- llvm::ModuleProvider* module_provider() const {
- return new llvm::ExistingModuleProvider(module());
- }
-#endif
void add_function(llvm::Function* function) const {
module()->getFunctionList().push_back(function);
}
llvm::Constant* get_external(const char* name,
- const llvm::FunctionType* sig) {
+ llvm::FunctionType* sig) {
return module()->getOrInsertFunction(name, sig);
}
// Basic types
private:
- const llvm::Type* _void_type;
- const llvm::IntegerType* _bit_type;
- const llvm::IntegerType* _jbyte_type;
- const llvm::IntegerType* _jshort_type;
- const llvm::IntegerType* _jint_type;
- const llvm::IntegerType* _jlong_type;
- const llvm::Type* _jfloat_type;
- const llvm::Type* _jdouble_type;
+ llvm::Type* _void_type;
+ llvm::IntegerType* _bit_type;
+ llvm::IntegerType* _jbyte_type;
+ llvm::IntegerType* _jshort_type;
+ llvm::IntegerType* _jint_type;
+ llvm::IntegerType* _jlong_type;
+ llvm::Type* _jfloat_type;
+ llvm::Type* _jdouble_type;
public:
- const llvm::Type* void_type() const {
+ llvm::Type* void_type() const {
return _void_type;
}
- const llvm::IntegerType* bit_type() const {
+ llvm::IntegerType* bit_type() const {
return _bit_type;
}
- const llvm::IntegerType* jbyte_type() const {
+ llvm::IntegerType* jbyte_type() const {
return _jbyte_type;
}
- const llvm::IntegerType* jshort_type() const {
+ llvm::IntegerType* jshort_type() const {
return _jshort_type;
}
- const llvm::IntegerType* jint_type() const {
+ llvm::IntegerType* jint_type() const {
return _jint_type;
}
- const llvm::IntegerType* jlong_type() const {
+ llvm::IntegerType* jlong_type() const {
return _jlong_type;
}
- const llvm::Type* jfloat_type() const {
+ llvm::Type* jfloat_type() const {
return _jfloat_type;
}
- const llvm::Type* jdouble_type() const {
+ llvm::Type* jdouble_type() const {
return _jdouble_type;
}
- const llvm::IntegerType* intptr_type() const {
+ llvm::IntegerType* intptr_type() const {
return LP64_ONLY(jlong_type()) NOT_LP64(jint_type());
}
// Compound types
private:
- const llvm::PointerType* _itableOffsetEntry_type;
- const llvm::PointerType* _jniEnv_type;
- const llvm::PointerType* _jniHandleBlock_type;
- const llvm::PointerType* _klass_type;
- const llvm::PointerType* _Method*_type;
- const llvm::ArrayType* _monitor_type;
- const llvm::PointerType* _oop_type;
- const llvm::PointerType* _thread_type;
- const llvm::PointerType* _zeroStack_type;
- const llvm::FunctionType* _entry_point_type;
- const llvm::FunctionType* _osr_entry_point_type;
+ llvm::PointerType* _itableOffsetEntry_type;
+ llvm::PointerType* _jniEnv_type;
+ llvm::PointerType* _jniHandleBlock_type;
+ llvm::PointerType* _Metadata_type;
+ llvm::PointerType* _klass_type;
+ llvm::PointerType* _Method_type;
+ llvm::ArrayType* _monitor_type;
+ llvm::PointerType* _oop_type;
+ llvm::PointerType* _thread_type;
+ llvm::PointerType* _zeroStack_type;
+ llvm::FunctionType* _entry_point_type;
+ llvm::FunctionType* _osr_entry_point_type;
public:
- const llvm::PointerType* itableOffsetEntry_type() const {
+ llvm::PointerType* itableOffsetEntry_type() const {
return _itableOffsetEntry_type;
}
- const llvm::PointerType* jniEnv_type() const {
+ llvm::PointerType* jniEnv_type() const {
return _jniEnv_type;
}
- const llvm::PointerType* jniHandleBlock_type() const {
+ llvm::PointerType* jniHandleBlock_type() const {
return _jniHandleBlock_type;
}
- const llvm::PointerType* klass_type() const {
+ llvm::PointerType* Metadata_type() const {
+ return _Metadata_type;
+ }
+ llvm::PointerType* klass_type() const {
return _klass_type;
}
- const llvm::PointerType* Method*_type() const {
- return _Method*_type;
+ llvm::PointerType* Method_type() const {
+ return _Method_type;
}
- const llvm::ArrayType* monitor_type() const {
+ llvm::ArrayType* monitor_type() const {
return _monitor_type;
}
- const llvm::PointerType* oop_type() const {
+ llvm::PointerType* oop_type() const {
return _oop_type;
}
- const llvm::PointerType* thread_type() const {
+ llvm::PointerType* thread_type() const {
return _thread_type;
}
- const llvm::PointerType* zeroStack_type() const {
+ llvm::PointerType* zeroStack_type() const {
return _zeroStack_type;
}
- const llvm::FunctionType* entry_point_type() const {
+ llvm::FunctionType* entry_point_type() const {
return _entry_point_type;
}
- const llvm::FunctionType* osr_entry_point_type() const {
+ llvm::FunctionType* osr_entry_point_type() const {
return _osr_entry_point_type;
}
// Mappings
private:
- const llvm::Type* _to_stackType[T_CONFLICT];
- const llvm::Type* _to_arrayType[T_CONFLICT];
+ llvm::Type* _to_stackType[T_CONFLICT];
+ llvm::Type* _to_arrayType[T_CONFLICT];
private:
- const llvm::Type* map_type(const llvm::Type* const* table,
+ llvm::Type* map_type(llvm::Type* const* table,
BasicType type) const {
assert(type >= 0 && type < T_CONFLICT, "unhandled type");
- const llvm::Type* result = table[type];
+ llvm::Type* result = table[type];
assert(result != NULL, "unhandled type");
return result;
}
public:
- const llvm::Type* to_stackType(BasicType type) const {
+ llvm::Type* to_stackType(BasicType type) const {
return map_type(_to_stackType, type);
}
- const llvm::Type* to_arrayType(BasicType type) const {
+ llvm::Type* to_arrayType(BasicType type) const {
return map_type(_to_arrayType, type);
}
--- a/hotspot/src/share/vm/shark/sharkFunction.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkFunction.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -91,7 +91,7 @@
bool is_osr() const {
return flow()->is_osr_flow();
}
- const llvm::FunctionType* entry_point_type() const {
+ llvm::FunctionType* entry_point_type() const {
if (is_osr())
return SharkType::osr_entry_point_type();
else
--- a/hotspot/src/share/vm/shark/sharkIntrinsics.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkIntrinsics.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -171,7 +171,7 @@
builder()->CreateBr(done);
builder()->SetInsertPoint(done);
- PHINode *phi = builder()->CreatePHI(a->getType(), "result");
+ PHINode *phi = builder()->CreatePHI(a->getType(), 0, "result");
phi->addIncoming(a, return_a);
phi->addIncoming(b, return_b);
@@ -210,7 +210,7 @@
Value *klass = builder()->CreateValueOfStructEntry(
state()->pop()->jobject_value(),
in_ByteSize(oopDesc::klass_offset_in_bytes()),
- SharkType::oop_type(),
+ SharkType::klass_type(),
"klass");
state()->push(
@@ -265,8 +265,7 @@
"addr");
// Perform the operation
- Value *result = builder()->CreateCmpxchgInt(x, addr, e);
-
+ Value *result = builder()->CreateAtomicCmpXchg(addr, e, x, llvm::SequentiallyConsistent);
// Push the result
state()->push(
SharkValue::create_jint(
--- a/hotspot/src/share/vm/shark/sharkMemoryManager.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkMemoryManager.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -79,7 +79,6 @@
mm()->setMemoryExecutable();
}
-#if SHARK_LLVM_VERSION >= 27
void SharkMemoryManager::deallocateExceptionTable(void *ptr) {
mm()->deallocateExceptionTable(ptr);
}
@@ -87,26 +86,23 @@
void SharkMemoryManager::deallocateFunctionBody(void *ptr) {
mm()->deallocateFunctionBody(ptr);
}
-#else
-void SharkMemoryManager::deallocateMemForFunction(const Function* F) {
- return mm()->deallocateMemForFunction(F);
-}
-#endif
uint8_t* SharkMemoryManager::allocateGlobal(uintptr_t Size,
unsigned int Alignment) {
return mm()->allocateGlobal(Size, Alignment);
}
-#if SHARK_LLVM_VERSION < 27
-void* SharkMemoryManager::getDlsymTable() const {
- return mm()->getDlsymTable();
+void* SharkMemoryManager::getPointerToNamedFunction(const std::string &Name, bool AbortOnFailure) {
+ return mm()->getPointerToNamedFunction(Name, AbortOnFailure);
}
-void SharkMemoryManager::SetDlsymTable(void *ptr) {
- mm()->SetDlsymTable(ptr);
+uint8_t* SharkMemoryManager::allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID) {
+ return mm()->allocateCodeSection(Size, Alignment, SectionID);
}
-#endif
+
+uint8_t* SharkMemoryManager::allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID) {
+ return mm()->allocateDataSection(Size, Alignment, SectionID);
+}
void SharkMemoryManager::setPoisonMemory(bool poison) {
mm()->setPoisonMemory(poison);
--- a/hotspot/src/share/vm/shark/sharkMemoryManager.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkMemoryManager.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -75,20 +75,15 @@
unsigned char* TableStart,
unsigned char* TableEnd,
unsigned char* FrameRegister);
-#if SHARK_LLVM_VERSION < 27
- void* getDlsymTable() const;
- void SetDlsymTable(void *ptr);
-#endif
+ void *getPointerToNamedFunction(const std::string &Name, bool AbortOnFailure = true);
+ uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID);
+ uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID);
void setPoisonMemory(bool);
uint8_t* allocateGlobal(uintptr_t, unsigned int);
void setMemoryWritable();
void setMemoryExecutable();
-#if SHARK_LLVM_VERSION >= 27
void deallocateExceptionTable(void *ptr);
void deallocateFunctionBody(void *ptr);
-#else
- void deallocateMemForFunction(const llvm::Function* F);
-#endif
unsigned char *allocateSpace(intptr_t Size,
unsigned int Alignment);
};
--- a/hotspot/src/share/vm/shark/sharkNativeWrapper.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkNativeWrapper.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -59,7 +59,6 @@
OopMap *oopmap = new OopMap(
SharkStack::oopmap_slot_munge(stack()->oopmap_frame_size()),
SharkStack::oopmap_slot_munge(arg_size()));
- oopmap->set_oop(SharkStack::slot2reg(stack()->method_slot_offset()));
// Set up the oop_tmp slot if required:
// - For static methods we use it to handlize the class argument
@@ -83,9 +82,9 @@
}
// Start building the argument list
- std::vector<const Type*> param_types;
+ std::vector<Type*> param_types;
std::vector<Value*> param_values;
- const PointerType *box_type = PointerType::getUnqual(SharkType::oop_type());
+ PointerType *box_type = PointerType::getUnqual(SharkType::oop_type());
// First argument is the JNIEnv
param_types.push_back(SharkType::jniEnv_type());
@@ -149,7 +148,7 @@
builder()->CreateBr(merge);
builder()->SetInsertPoint(merge);
- phi = builder()->CreatePHI(box_type, "boxed_object");
+ phi = builder()->CreatePHI(box_type, 0, "boxed_object");
phi->addIncoming(ConstantPointerNull::get(box_type), null);
phi->addIncoming(box, not_null);
box = phi;
@@ -170,7 +169,7 @@
// fall through
default:
- const Type *param_type = SharkType::to_stackType(arg_type(i));
+ Type *param_type = SharkType::to_stackType(arg_type(i));
param_types.push_back(param_type);
param_values.push_back(
@@ -201,7 +200,7 @@
// Make the call
BasicType result_type = target()->result_type();
- const Type* return_type;
+ Type* return_type;
if (result_type == T_VOID)
return_type = SharkType::void_type();
else if (is_returning_oop())
@@ -213,7 +212,7 @@
PointerType::getUnqual(
FunctionType::get(return_type, param_types, false)));
Value *result = builder()->CreateCall(
- native_function, param_values.begin(), param_values.end());
+ native_function, llvm::makeArrayRef(param_values));
// Start the transition back to _thread_in_Java
CreateSetThreadState(_thread_in_native_trans);
@@ -221,7 +220,7 @@
// Make sure new state is visible in the GC thread
if (os::is_MP()) {
if (UseMembar)
- builder()->CreateMemoryBarrier(SharkBuilder::BARRIER_STORELOAD);
+ builder()->CreateFence(llvm::SequentiallyConsistent, llvm::CrossThread);
else
CreateWriteMemorySerializePage();
}
@@ -305,7 +304,7 @@
builder()->CreateBr(merge);
builder()->SetInsertPoint(merge);
- PHINode *phi = builder()->CreatePHI(SharkType::oop_type(), "result");
+ PHINode *phi = builder()->CreatePHI(SharkType::oop_type(), 0, "result");
phi->addIncoming(LLVMValue::null(), null);
phi->addIncoming(unboxed_result, not_null);
result = phi;
--- a/hotspot/src/share/vm/shark/sharkStack.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkStack.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -75,7 +75,7 @@
_method_slot_offset = offset++;
if (setup_sp_and_method) {
builder()->CreateStore(
- method, slot_addr(method_slot_offset(), SharkType::Method*_type()));
+ method, slot_addr(method_slot_offset(), SharkType::Method_type()));
}
// Unextended SP
@@ -163,7 +163,7 @@
}
Value* SharkStack::slot_addr(int offset,
- const Type* type,
+ Type* type,
const char* name) const {
bool needs_cast = type && type != SharkType::intptr_type();
--- a/hotspot/src/share/vm/shark/sharkStack.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkStack.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -204,7 +204,7 @@
// Addresses of things in the frame
public:
llvm::Value* slot_addr(int offset,
- const llvm::Type* type = NULL,
+ llvm::Type* type = NULL,
const char* name = "") const;
llvm::Value* monitor_addr(int index) const {
--- a/hotspot/src/share/vm/shark/sharkState.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkState.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -131,7 +131,7 @@
Value *this_method = this->method();
Value *other_method = other->method();
if (this_method != other_method) {
- PHINode *phi = builder()->CreatePHI(SharkType::Method*_type(), "method");
+ PHINode *phi = builder()->CreatePHI(SharkType::Method_type(), 0, "method");
phi->addIncoming(this_method, this_block);
phi->addIncoming(other_method, other_block);
set_method(phi);
@@ -142,7 +142,7 @@
Value *other_oop_tmp = other->oop_tmp();
if (this_oop_tmp != other_oop_tmp) {
assert(this_oop_tmp && other_oop_tmp, "can't merge NULL with non-NULL");
- PHINode *phi = builder()->CreatePHI(SharkType::oop_type(), "oop_tmp");
+ PHINode *phi = builder()->CreatePHI(SharkType::oop_type(), 0, "oop_tmp");
phi->addIncoming(this_oop_tmp, this_block);
phi->addIncoming(other_oop_tmp, other_block);
set_oop_tmp(phi);
@@ -243,7 +243,7 @@
Value* method,
Value* osr_buf)
: SharkState(block) {
- assert(!block->stack_depth_at_entry(), "entry block shouldn't have stack");
+ assert(block->stack_depth_at_entry() == 0, "entry block shouldn't have stack");
set_num_monitors(block->ciblock()->monitor_count());
// Local variables
@@ -287,7 +287,7 @@
char name[18];
// Method
- set_method(builder()->CreatePHI(SharkType::Method*_type(), "method"));
+ set_method(builder()->CreatePHI(SharkType::Method_type(), 0, "method"));
// Local variables
for (int i = 0; i < max_locals(); i++) {
@@ -307,7 +307,7 @@
case T_ARRAY:
snprintf(name, sizeof(name), "local_%d_", i);
value = SharkValue::create_phi(
- type, builder()->CreatePHI(SharkType::to_stackType(type), name));
+ type, builder()->CreatePHI(SharkType::to_stackType(type), 0, name));
break;
case T_ADDRESS:
@@ -345,7 +345,7 @@
case T_ARRAY:
snprintf(name, sizeof(name), "stack_%d_", i);
value = SharkValue::create_phi(
- type, builder()->CreatePHI(SharkType::to_stackType(type), name));
+ type, builder()->CreatePHI(SharkType::to_stackType(type), 0, name));
break;
case T_ADDRESS:
--- a/hotspot/src/share/vm/shark/sharkTopLevelBlock.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkTopLevelBlock.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -65,6 +65,7 @@
switch (bc()) {
case Bytecodes::_ldc:
case Bytecodes::_ldc_w:
+ case Bytecodes::_ldc2_w:
if (!SharkConstant::for_ldc(iter())->is_loaded()) {
set_trap(
Deoptimization::make_trap_request(
@@ -109,7 +110,8 @@
case Bytecodes::_invokespecial:
case Bytecodes::_invokevirtual:
case Bytecodes::_invokeinterface:
- method = iter()->get_method(will_link);
+ ciSignature* sig;
+ method = iter()->get_method(will_link, &sig);
assert(will_link, "typeflow responsibility");
if (!method->holder()->is_linked()) {
@@ -562,12 +564,12 @@
Value *exception_klass = builder()->CreateValueOfStructEntry(
xstack(0)->jobject_value(),
in_ByteSize(oopDesc::klass_offset_in_bytes()),
- SharkType::oop_type(),
+ SharkType::klass_type(),
"exception_klass");
for (int i = 0; i < num_options; i++) {
Value *check_klass =
- builder()->CreateInlineOop(exc_handler(i)->catch_klass());
+ builder()->CreateInlineMetadata(exc_handler(i)->catch_klass(), SharkType::klass_type());
BasicBlock *not_exact = function()->CreateBlock("not_exact");
BasicBlock *not_subtype = function()->CreateBlock("not_subtype");
@@ -823,7 +825,7 @@
builder()->CreateArrayAddress(
array->jarray_value(), basic_type, index->jint_value()));
- const Type *stack_type = SharkType::to_stackType(basic_type);
+ Type *stack_type = SharkType::to_stackType(basic_type);
if (value->getType() != stack_type)
value = builder()->CreateIntCast(value, stack_type, basic_type != T_CHAR);
@@ -910,7 +912,7 @@
ShouldNotReachHere();
}
- const Type *array_type = SharkType::to_arrayType(basic_type);
+ Type *array_type = SharkType::to_arrayType(basic_type);
if (value->getType() != array_type)
value = builder()->CreateIntCast(value, array_type, basic_type != T_CHAR);
@@ -1102,9 +1104,9 @@
Value *SharkTopLevelBlock::get_direct_callee(ciMethod* method) {
return builder()->CreateBitCast(
- builder()->CreateInlineOop(method),
- SharkType::Method*_type(),
- "callee");
+ builder()->CreateInlineMetadata(method, SharkType::Method_type()),
+ SharkType::Method_type(),
+ "callee");
}
Value *SharkTopLevelBlock::get_virtual_callee(SharkValue* receiver,
@@ -1118,7 +1120,7 @@
return builder()->CreateLoad(
builder()->CreateArrayAddress(
klass,
- SharkType::Method*_type(),
+ SharkType::Method_type(),
vtableEntry::size() * wordSize,
in_ByteSize(InstanceKlass::vtable_start_offset() * wordSize),
LLVMValue::intptr_constant(vtable_index)),
@@ -1136,7 +1138,7 @@
// Locate the receiver's itable
Value *object_klass = builder()->CreateValueOfStructEntry(
receiver->jobject_value(), in_ByteSize(oopDesc::klass_offset_in_bytes()),
- SharkType::oop_type(),
+ SharkType::klass_type(),
"object_klass");
Value *vtable_start = builder()->CreateAdd(
@@ -1169,12 +1171,12 @@
}
// Locate this interface's entry in the table
- Value *iklass = builder()->CreateInlineOop(method->holder());
+ Value *iklass = builder()->CreateInlineMetadata(method->holder(), SharkType::klass_type());
BasicBlock *loop_entry = builder()->GetInsertBlock();
builder()->CreateBr(loop);
builder()->SetInsertPoint(loop);
PHINode *itable_entry_addr = builder()->CreatePHI(
- SharkType::intptr_type(), "itable_entry_addr");
+ SharkType::intptr_type(), 0, "itable_entry_addr");
itable_entry_addr->addIncoming(itable_start, loop_entry);
Value *itable_entry = builder()->CreateIntToPtr(
@@ -1183,11 +1185,11 @@
Value *itable_iklass = builder()->CreateValueOfStructEntry(
itable_entry,
in_ByteSize(itableOffsetEntry::interface_offset_in_bytes()),
- SharkType::oop_type(),
+ SharkType::klass_type(),
"itable_iklass");
builder()->CreateCondBr(
- builder()->CreateICmpEQ(itable_iklass, LLVMValue::null()),
+ builder()->CreateICmpEQ(itable_iklass, LLVMValue::nullKlass()),
got_null, not_null);
// A null entry means that the class doesn't implement the
@@ -1231,7 +1233,7 @@
method->itable_index() * itableMethodEntry::size() * wordSize)),
LLVMValue::intptr_constant(
itableMethodEntry::method_offset_in_bytes())),
- PointerType::getUnqual(SharkType::Method*_type())),
+ PointerType::getUnqual(SharkType::Method_type())),
"callee");
}
@@ -1243,7 +1245,9 @@
// Find the method being called
bool will_link;
- ciMethod *dest_method = iter()->get_method(will_link);
+ ciSignature* sig;
+ ciMethod *dest_method = iter()->get_method(will_link, &sig);
+
assert(will_link, "typeflow responsibility");
assert(dest_method->is_static() == is_static, "must match bc");
@@ -1259,10 +1263,17 @@
assert(holder_klass->is_interface() ||
holder_klass->super() == NULL ||
!is_interface, "must match bc");
+
+ bool is_forced_virtual = is_interface && holder_klass == java_lang_Object_klass();
+
ciKlass *holder = iter()->get_declared_method_holder();
ciInstanceKlass *klass =
ciEnv::get_instance_klass_for_declared_method_holder(holder);
+ if (is_forced_virtual) {
+ klass = java_lang_Object_klass();
+ }
+
// Find the receiver in the stack. We do this before
// trying to inline because the inliner can only use
// zero-checked values, not being able to perform the
@@ -1294,7 +1305,7 @@
// Find the method we are calling
Value *callee;
if (call_is_virtual) {
- if (is_virtual) {
+ if (is_virtual || is_forced_virtual) {
assert(klass->is_linked(), "scan_for_traps responsibility");
int vtable_index = call_method->resolve_vtable_index(
target()->holder(), klass);
@@ -1490,12 +1501,12 @@
// Get the class we're checking against
builder()->SetInsertPoint(not_null);
- Value *check_klass = builder()->CreateInlineOop(klass);
+ Value *check_klass = builder()->CreateInlineMetadata(klass, SharkType::klass_type());
// Get the class of the object being tested
Value *object_klass = builder()->CreateValueOfStructEntry(
object, in_ByteSize(oopDesc::klass_offset_in_bytes()),
- SharkType::oop_type(),
+ SharkType::klass_type(),
"object_klass");
// Perform the check
@@ -1520,7 +1531,7 @@
// First merge
builder()->SetInsertPoint(merge1);
PHINode *nonnull_result = builder()->CreatePHI(
- SharkType::jint_type(), "nonnull_result");
+ SharkType::jint_type(), 0, "nonnull_result");
nonnull_result->addIncoming(
LLVMValue::jint_constant(IC_IS_INSTANCE), is_instance);
nonnull_result->addIncoming(
@@ -1531,7 +1542,7 @@
// Second merge
builder()->SetInsertPoint(merge2);
PHINode *result = builder()->CreatePHI(
- SharkType::jint_type(), "result");
+ SharkType::jint_type(), 0, "result");
result->addIncoming(LLVMValue::jint_constant(IC_IS_NULL), null_block);
result->addIncoming(nonnull_result, nonnull_block);
@@ -1698,7 +1709,7 @@
heap_object = builder()->CreateIntToPtr(
old_top, SharkType::oop_type(), "heap_object");
- Value *check = builder()->CreateCmpxchgPtr(new_top, top_addr, old_top);
+ Value *check = builder()->CreateAtomicCmpXchg(top_addr, old_top, new_top, llvm::SequentiallyConsistent);
builder()->CreateCondBr(
builder()->CreateICmpEQ(old_top, check),
initialize, retry);
@@ -1707,7 +1718,7 @@
builder()->SetInsertPoint(initialize);
if (tlab_object) {
PHINode *phi = builder()->CreatePHI(
- SharkType::oop_type(), "fast_object");
+ SharkType::oop_type(), 0, "fast_object");
phi->addIncoming(tlab_object, got_tlab);
phi->addIncoming(heap_object, got_heap);
fast_object = phi;
@@ -1730,7 +1741,7 @@
Value *klass_addr = builder()->CreateAddressOfStructEntry(
fast_object, in_ByteSize(oopDesc::klass_offset_in_bytes()),
- PointerType::getUnqual(SharkType::oop_type()),
+ PointerType::getUnqual(SharkType::klass_type()),
"klass_addr");
// Set the mark
@@ -1744,7 +1755,7 @@
builder()->CreateStore(LLVMValue::intptr_constant(mark), mark_addr);
// Set the class
- Value *rtklass = builder()->CreateInlineOop(klass);
+ Value *rtklass = builder()->CreateInlineMetadata(klass, SharkType::klass_type());
builder()->CreateStore(rtklass, klass_addr);
got_fast = builder()->GetInsertBlock();
@@ -1767,7 +1778,7 @@
builder()->SetInsertPoint(push_object);
}
if (fast_object) {
- PHINode *phi = builder()->CreatePHI(SharkType::oop_type(), "object");
+ PHINode *phi = builder()->CreatePHI(SharkType::oop_type(), 0, "object");
phi->addIncoming(fast_object, got_fast);
phi->addIncoming(slow_object, got_slow);
object = phi;
@@ -1849,8 +1860,9 @@
void SharkTopLevelBlock::acquire_method_lock() {
Value *lockee;
- if (target()->is_static())
+ if (target()->is_static()) {
lockee = builder()->CreateInlineOop(target()->holder()->java_mirror());
+ }
else
lockee = local(0)->jobject_value();
@@ -1898,7 +1910,7 @@
Value *lock = builder()->CreatePtrToInt(
monitor_header_addr, SharkType::intptr_type());
- Value *check = builder()->CreateCmpxchgPtr(lock, mark_addr, disp);
+ Value *check = builder()->CreateAtomicCmpXchg(mark_addr, disp, lock, llvm::Acquire);
builder()->CreateCondBr(
builder()->CreateICmpEQ(disp, check),
acquired_fast, try_recursive);
@@ -1983,7 +1995,7 @@
PointerType::getUnqual(SharkType::intptr_type()),
"mark_addr");
- Value *check = builder()->CreateCmpxchgPtr(disp, mark_addr, lock);
+ Value *check = builder()->CreateAtomicCmpXchg(mark_addr, lock, disp, llvm::Release);
builder()->CreateCondBr(
builder()->CreateICmpEQ(lock, check),
released_fast, slow_path);
--- a/hotspot/src/share/vm/shark/sharkTopLevelBlock.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkTopLevelBlock.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -290,7 +290,7 @@
int exception_action) {
decache_for_VM_call();
stack()->CreateSetLastJavaFrame();
- llvm::CallInst *res = builder()->CreateCall(callee, args_start, args_end);
+ llvm::CallInst *res = builder()->CreateCall(callee, llvm::makeArrayRef(args_start, args_end));
stack()->CreateResetLastJavaFrame();
cache_after_VM_call();
if (exception_action & EAM_CHECK) {
--- a/hotspot/src/share/vm/shark/sharkType.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkType.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -40,82 +40,85 @@
// Basic types
public:
- static const llvm::Type* void_type() {
+ static llvm::Type* void_type() {
return context().void_type();
}
- static const llvm::IntegerType* bit_type() {
+ static llvm::IntegerType* bit_type() {
return context().bit_type();
}
- static const llvm::IntegerType* jbyte_type() {
+ static llvm::IntegerType* jbyte_type() {
return context().jbyte_type();
}
- static const llvm::IntegerType* jshort_type() {
+ static llvm::IntegerType* jshort_type() {
return context().jshort_type();
}
- static const llvm::IntegerType* jint_type() {
+ static llvm::IntegerType* jint_type() {
return context().jint_type();
}
- static const llvm::IntegerType* jlong_type() {
+ static llvm::IntegerType* jlong_type() {
return context().jlong_type();
}
- static const llvm::Type* jfloat_type() {
+ static llvm::Type* jfloat_type() {
return context().jfloat_type();
}
- static const llvm::Type* jdouble_type() {
+ static llvm::Type* jdouble_type() {
return context().jdouble_type();
}
- static const llvm::IntegerType* intptr_type() {
+ static llvm::IntegerType* intptr_type() {
return context().intptr_type();
}
// Compound types
public:
- static const llvm::PointerType* itableOffsetEntry_type() {
+ static llvm::PointerType* itableOffsetEntry_type() {
return context().itableOffsetEntry_type();
}
- static const llvm::PointerType* jniEnv_type() {
+ static llvm::PointerType* jniEnv_type() {
return context().jniEnv_type();
}
- static const llvm::PointerType* jniHandleBlock_type() {
+ static llvm::PointerType* jniHandleBlock_type() {
return context().jniHandleBlock_type();
}
- static const llvm::PointerType* klass_type() {
+ static llvm::PointerType* Metadata_type() {
+ return context().Metadata_type();
+ }
+ static llvm::PointerType* klass_type() {
return context().klass_type();
}
- static const llvm::PointerType* Method*_type() {
- return context().Method*_type();
+ static llvm::PointerType* Method_type() {
+ return context().Method_type();
}
- static const llvm::ArrayType* monitor_type() {
+ static llvm::ArrayType* monitor_type() {
return context().monitor_type();
}
- static const llvm::PointerType* oop_type() {
+ static llvm::PointerType* oop_type() {
return context().oop_type();
}
- static const llvm::PointerType* thread_type() {
+ static llvm::PointerType* thread_type() {
return context().thread_type();
}
- static const llvm::PointerType* zeroStack_type() {
+ static llvm::PointerType* zeroStack_type() {
return context().zeroStack_type();
}
- static const llvm::FunctionType* entry_point_type() {
+ static llvm::FunctionType* entry_point_type() {
return context().entry_point_type();
}
- static const llvm::FunctionType* osr_entry_point_type() {
+ static llvm::FunctionType* osr_entry_point_type() {
return context().osr_entry_point_type();
}
// Mappings
public:
- static const llvm::Type* to_stackType(BasicType type) {
+ static llvm::Type* to_stackType(BasicType type) {
return context().to_stackType(type);
}
- static const llvm::Type* to_stackType(ciType* type) {
+ static llvm::Type* to_stackType(ciType* type) {
return to_stackType(type->basic_type());
}
- static const llvm::Type* to_arrayType(BasicType type) {
+ static llvm::Type* to_arrayType(BasicType type) {
return context().to_arrayType(type);
}
- static const llvm::Type* to_arrayType(ciType* type) {
+ static llvm::Type* to_arrayType(ciType* type) {
return to_arrayType(type->basic_type());
}
};
--- a/hotspot/src/share/vm/shark/sharkValue.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/sharkValue.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -233,7 +233,7 @@
assert(type() == other->type(), "should be");
assert(zero_checked() == other->zero_checked(), "should be");
- PHINode *phi = builder->CreatePHI(SharkType::to_stackType(type()), name);
+ PHINode *phi = builder->CreatePHI(SharkType::to_stackType(type()), 0, name);
phi->addIncoming(this->generic_value(), this_block);
phi->addIncoming(other->generic_value(), other_block);
return SharkValue::create_generic(type(), phi, zero_checked());
--- a/hotspot/src/share/vm/shark/shark_globals.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/shark/shark_globals.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -40,6 +40,12 @@
product(intx, SharkMaxInlineSize, 32, \
"Maximum bytecode size of methods to inline when using Shark") \
\
+ product(bool, EliminateNestedLocks, true, \
+ "Eliminate nested locks of the same object when possible") \
+ \
+ product(ccstr, SharkOptimizationLevel, "Default", \
+ "The optimization level passed to LLVM, possible values: None, Less, Default and Agressive") \
+ \
/* compiler debugging */ \
develop(ccstr, SharkPrintTypeflowOf, NULL, \
"Print the typeflow of the specified method") \
@@ -58,6 +64,10 @@
\
diagnostic(bool, SharkPerformanceWarnings, false, \
"Warn about things that could be made faster") \
+ \
+ develop(ccstr, SharkVerifyFunction, NULL, \
+ "Runs LLVM verify over LLVM IR") \
+
SHARK_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
--- a/hotspot/src/share/vm/utilities/array.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/utilities/array.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -24,19 +24,8 @@
#include "precompiled.hpp"
#include "memory/resourceArea.hpp"
+#include "runtime/thread.inline.hpp"
#include "utilities/array.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
#ifdef ASSERT
--- a/hotspot/src/share/vm/utilities/debug.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/utilities/debug.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -44,6 +44,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/vframe.hpp"
#include "services/heapDumper.hpp"
#include "utilities/defaultStream.hpp"
@@ -52,19 +53,15 @@
#include "utilities/vmError.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
-# include "thread_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
-# include "thread_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
-# include "thread_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
-# include "thread_bsd.inline.hpp"
#endif
#ifndef ASSERT
--- a/hotspot/src/share/vm/utilities/events.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/utilities/events.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,22 +26,11 @@
#include "memory/allocation.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/osThread.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
#include "runtime/threadLocalStorage.hpp"
#include "runtime/timer.hpp"
#include "utilities/events.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
EventLog* Events::_logs = NULL;
--- a/hotspot/src/share/vm/utilities/events.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/utilities/events.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -135,11 +135,11 @@
};
// A simple wrapper class for fixed size text messages.
-class StringLogMessage : public FormatBuffer<132> {
+class StringLogMessage : public FormatBuffer<256> {
public:
// Wrap this buffer in a stringStream.
stringStream stream() {
- return stringStream(_buf, sizeof(_buf));
+ return stringStream(_buf, size());
}
};
--- a/hotspot/src/share/vm/utilities/exceptions.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/utilities/exceptions.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -30,21 +30,10 @@
#include "runtime/init.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
#include "utilities/events.hpp"
#include "utilities/exceptions.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
// Implementation of ThreadShadow
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -1280,4 +1280,12 @@
#define ARRAY_SIZE(array) (sizeof(array)/sizeof((array)[0]))
+// Dereference vptr
+// All C++ compilers that we know of have the vtbl pointer in the first
+// word. If there are exceptions, this function needs to be made compiler
+// specific.
+static inline void* dereference_vptr(void* addr) {
+ return *(void**)addr;
+}
+
#endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_HPP
--- a/hotspot/src/share/vm/utilities/growableArray.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/utilities/growableArray.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -24,19 +24,9 @@
#include "precompiled.hpp"
#include "memory/resourceArea.hpp"
+#include "runtime/thread.inline.hpp"
#include "utilities/growableArray.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
+
#ifdef ASSERT
void GenericGrowableArray::set_nesting() {
if (on_stack()) {
--- a/hotspot/src/share/vm/utilities/preserveException.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/utilities/preserveException.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -26,18 +26,7 @@
#define SHARE_VM_UTILITIES_PRESERVEEXCEPTION_HPP
#include "runtime/handles.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
+#include "runtime/thread.inline.hpp"
// This file provides more support for exception handling; see also exceptions.hpp
class PreserveExceptionMark {
--- a/hotspot/src/share/vm/utilities/taskqueue.cpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/utilities/taskqueue.cpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,21 +25,10 @@
#include "precompiled.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/os.hpp"
+#include "runtime/thread.inline.hpp"
#include "utilities/debug.hpp"
#include "utilities/stack.inline.hpp"
#include "utilities/taskqueue.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
#ifdef TRACESPINNING
uint ParallelTaskTerminator::_total_yields = 0;
--- a/hotspot/src/share/vm/utilities/workgroup.hpp Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/src/share/vm/utilities/workgroup.hpp Mon Dec 17 08:30:06 2012 -0500
@@ -25,19 +25,8 @@
#ifndef SHARE_VM_UTILITIES_WORKGROUP_HPP
#define SHARE_VM_UTILITIES_WORKGROUP_HPP
+#include "runtime/thread.inline.hpp"
#include "utilities/taskqueue.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "thread_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "thread_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "thread_bsd.inline.hpp"
-#endif
// Task class hierarchy:
// AbstractGangTask
--- a/hotspot/test/compiler/6865265/StackOverflowBug.java Mon Dec 17 08:28:27 2012 -0500
+++ b/hotspot/test/compiler/6865265/StackOverflowBug.java Mon Dec 17 08:30:06 2012 -0500
@@ -28,7 +28,7 @@
* @summary JVM crashes with "missing exception handler" error
* @author volker.simonis@sap.com
*
- * @run main/othervm -XX:CompileThreshold=100 -Xbatch -Xss224k StackOverflowBug
+ * @run main/othervm -XX:CompileThreshold=100 -Xbatch -Xss248k StackOverflowBug
*/
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/8003720/Asmator.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+import jdk.internal.org.objectweb.asm.*;
+
+class Asmator {
+ static byte[] fixup(byte[] buf) throws java.io.IOException {
+ ClassReader cr = new ClassReader(buf);
+ ClassWriter cw = new ClassWriter(0);
+ ClassVisitor cv = new ClassVisitor(Opcodes.ASM4, cw) {
+ public MethodVisitor visitMethod(
+ final int access,
+ final String name,
+ final String desc,
+ final String signature,
+ final String[] exceptions)
+ {
+ MethodVisitor mv = super.visitMethod(access,
+ name,
+ desc,
+ signature,
+ exceptions);
+ if (mv == null) return null;
+ if (name.equals("callme")) {
+ // make receiver go dead!
+ mv.visitInsn(Opcodes.ACONST_NULL);
+ mv.visitVarInsn(Opcodes.ASTORE, 0);
+ }
+ return mv;
+ }
+ };
+ cr.accept(cv, 0);
+ return cw.toByteArray();
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/8003720/Test8003720.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8003720
+ * @summary Method in interpreter stack frame can be deallocated
+ * @compile -XDignore.symbol.file -source 1.7 -target 1.7 Victim.java
+ * @run main/othervm -Xverify:all -Xint Test8003720
+ */
+
+// Attempts to make the JVM unload a class while still executing one of its methods.
+public class Test8003720 {
+ final static String VICTIM_CLASS_NAME = "Victim";
+ final static boolean QUIET = true;
+ final static long DURATION = 30000;
+
+ public interface CallMe { void callme(); }
+
+ public static void main(String... av) throws Throwable {
+ newVictimClassLoader();
+ System.gc();
+
+ newVictimClass();
+ System.gc();
+
+ newVictimInstance();
+ System.gc();
+
+ ((CallMe)newVictimInstance()).callme();
+ }
+
+ public static Object newVictimInstance() throws Throwable {
+ return newVictimClass().newInstance();
+ }
+
+ public static Class<?> newVictimClass() throws Throwable {
+ return Class.forName(VICTIM_CLASS_NAME, true, new VictimClassLoader());
+ }
+
+ public static ClassLoader newVictimClassLoader() throws Throwable {
+ return new VictimClassLoader();
+ }
+
+ public static void println(String line) {
+ if (!QUIET) {
+ System.out.println(line);
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/8003720/Victim.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+public class Victim implements Test8003720.CallMe {
+ public void callme() {
+ // note: Victim.this is dead here
+ Test8003720.println("executing in loader=" + Victim.class.getClassLoader());
+
+ long now = System.currentTimeMillis();
+
+ while ((System.currentTimeMillis() - now) < Test8003720.DURATION) {
+ long count = VictimClassLoader.counter++;
+ if (count % 1000000 == 0) System.gc();
+ if (count % 16180000 == 0) blurb();
+ new Object[1].clone();
+ }
+ }
+ static void blurb() {
+ Test8003720.println("count=" + VictimClassLoader.counter);
+ }
+ static {
+ blather();
+ }
+ static void blather() {
+ new java.util.ArrayList<Object>(1000000);
+ Class<Victim> c = Victim.class;
+ Test8003720.println("initializing " + c + "#" + System.identityHashCode(c) + " in " + c.getClassLoader());
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/8003720/VictimClassLoader.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+public class VictimClassLoader extends ClassLoader {
+ public static long counter = 0;
+
+ private int which = (int) ++counter;
+
+ protected VictimClassLoader() {
+ super(VictimClassLoader.class.getClassLoader());
+ }
+
+ protected Class loadClass(String name, boolean resolve) throws ClassNotFoundException {
+ Class c;
+ if (!name.endsWith("Victim")) {
+ c = super.loadClass(name, resolve);
+ return c;
+ }
+
+ c = findLoadedClass(name);
+ if (c != null) {
+ return c;
+ }
+
+ byte[] buf = readClassFile(name);
+ c = defineClass(name, buf, 0, buf.length);
+ resolveClass(c);
+
+ if (c.getClassLoader() != this) {
+ throw new AssertionError();
+ }
+
+ Test8003720.println("loaded " + c + "#" + System.identityHashCode(c) + " in " + c.getClassLoader());
+ return c;
+ }
+
+ static byte[] readClassFile(String name) {
+ try {
+ String rname = name.substring(name.lastIndexOf('.') + 1) + ".class";
+ java.net.URL url = VictimClassLoader.class.getResource(rname);
+ Test8003720.println("found " + rname + " = " + url);
+
+ java.net.URLConnection connection = url.openConnection();
+ int contentLength = connection.getContentLength();
+ byte[] buf = readFully(connection.getInputStream(), contentLength);
+
+ return Asmator.fixup(buf);
+ } catch (java.io.IOException ex) {
+ throw new Error(ex);
+ }
+ }
+
+ static byte[] readFully(java.io.InputStream in, int len) throws java.io.IOException {
+ // Warning here:
+ return sun.misc.IOUtils.readFully(in, len, true);
+ }
+
+ public void finalize() {
+ Test8003720.println("Goodbye from " + this);
+ }
+
+ public String toString() {
+ return "VictimClassLoader#" + which;
+ }
+}
--- a/jaxp/.hgtags Mon Dec 17 08:28:27 2012 -0500
+++ b/jaxp/.hgtags Mon Dec 17 08:30:06 2012 -0500
@@ -187,3 +187,5 @@
192d8a244bc36427757866e9fb3a08938c0e674c jdk8-b63
27ab79568c34abf80958d5fa8c04fd1740d243da jdk8-b64
5cf3c69a93d6d088a1cdfa28031d4f0f9438c0de jdk8-b65
+e6af1ad464e3d9b1154b9f9ed9a5373b97d129fc jdk8-b66
+83df3493ca3cf0be077f1d0dd90119456f266f54 jdk8-b67
--- a/jaxws/.hgtags Mon Dec 17 08:28:27 2012 -0500
+++ b/jaxws/.hgtags Mon Dec 17 08:30:06 2012 -0500
@@ -187,3 +187,5 @@
86989f702267debe16d13720d5ae7ae9839796f4 jdk8-b63
5ded18a14bcc80b2a253f2b84da0073a0ecac665 jdk8-b64
fbe54291c9d337ea4dfef4d846f1d9a22f76249c jdk8-b65
+3eb7f11cb4e000555c1b6f0f1a10fe2919633c8e jdk8-b66
+eb06aa51dfc225614dba2d89ae7ca6cedddff982 jdk8-b67
--- a/jdk/.hgtags Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/.hgtags Mon Dec 17 08:30:06 2012 -0500
@@ -188,3 +188,5 @@
26dbd73fb7662a29b3e47179fdc88a0bfa4e231e jdk8-b64
130d3a54d28becaac0846137256c2684adb34c33 jdk8-b65
4d337fae2250135729ee9ed2bf8baf3c60da5d6d jdk8-b66
+ce9b02a3a17edd1983201002cfa0f364e4ab7524 jdk8-b67
+53fb43e4d614c92310e1fb00ec41d1960fd9facf jdk8-b68
--- a/jdk/make/com/sun/security/Makefile Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/make/com/sun/security/Makefile Mon Dec 17 08:30:06 2012 -0500
@@ -35,7 +35,7 @@
include $(BUILDDIR)/common/Defs.gmk
SUBDIRS = auth
-SUBDIRS_misc = jgss sasl auth/module
+SUBDIRS_misc = jgss sasl auth/module ntlm
include $(BUILDDIR)/common/Subdirs.gmk
all build clean clobber::
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/make/com/sun/security/ntlm/Makefile Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,39 @@
+#
+# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation. Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+BUILDDIR = ../../../..
+PACKAGE = com.sun.security.ntlm
+PRODUCT = sun
+include $(BUILDDIR)/common/Defs.gmk
+
+#
+# Files
+#
+AUTO_FILES_JAVA_DIRS = com/sun/security/ntlm
+
+#
+# Rules
+#
+include $(BUILDDIR)/common/Classes.gmk
--- a/jdk/make/sun/security/Makefile Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/make/sun/security/Makefile Mon Dec 17 08:30:06 2012 -0500
@@ -38,10 +38,12 @@
SUBDIRS_MAKEFLAGS += JAVAC_WARNINGS_FATAL=true
include $(BUILDDIR)/common/Defs.gmk
-# build sun/security/jgss/wrapper on non-windows platform
+# build sun/security/jgss/wrapper on non-windows non-macosx platforms
JGSS_WRAPPER =
ifneq ($(PLATFORM), windows)
- JGSS_WRAPPER = jgss/wrapper
+ ifneq ($(PLATFORM), macosx)
+ JGSS_WRAPPER = jgss/wrapper
+ endif
endif
# Build PKCS#11 on all platforms
--- a/jdk/make/tools/src/build/tools/cldrconverter/Bundle.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/make/tools/src/build/tools/cldrconverter/Bundle.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,6 +29,7 @@
import java.util.Arrays;
import java.util.EnumSet;
import java.util.HashMap;
+import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -86,7 +87,23 @@
private final static String[] ERA_KEYS = {
"long.Eras",
"Eras",
- "short.Eras"
+ "narrow.Eras"
+ };
+
+ // Keys for individual time zone names
+ private final static String TZ_GEN_LONG_KEY = "timezone.displayname.generic.long";
+ private final static String TZ_GEN_SHORT_KEY = "timezone.displayname.generic.short";
+ private final static String TZ_STD_LONG_KEY = "timezone.displayname.standard.long";
+ private final static String TZ_STD_SHORT_KEY = "timezone.displayname.standard.short";
+ private final static String TZ_DST_LONG_KEY = "timezone.displayname.daylight.long";
+ private final static String TZ_DST_SHORT_KEY = "timezone.displayname.daylight.short";
+ private final static String[] ZONE_NAME_KEYS = {
+ TZ_STD_LONG_KEY,
+ TZ_STD_SHORT_KEY,
+ TZ_DST_LONG_KEY,
+ TZ_DST_SHORT_KEY,
+ TZ_GEN_LONG_KEY,
+ TZ_GEN_SHORT_KEY
};
private final String id;
@@ -98,6 +115,7 @@
return bundles.get(id);
}
+ @SuppressWarnings("ConvertToStringSwitch")
Bundle(String id, String cldrPath, String bundles, String currencies) {
this.id = id;
this.cldrPath = cldrPath;
@@ -242,9 +260,12 @@
// handle multiple inheritance for month and day names
handleMultipleInheritance(myMap, parentsMap, calendarPrefix + "MonthNames");
handleMultipleInheritance(myMap, parentsMap, calendarPrefix + "MonthAbbreviations");
+ handleMultipleInheritance(myMap, parentsMap, calendarPrefix + "MonthNarrows");
handleMultipleInheritance(myMap, parentsMap, calendarPrefix + "DayNames");
handleMultipleInheritance(myMap, parentsMap, calendarPrefix + "DayAbbreviations");
+ handleMultipleInheritance(myMap, parentsMap, calendarPrefix + "DayNarrows");
handleMultipleInheritance(myMap, parentsMap, calendarPrefix + "AmPmMarkers");
+ handleMultipleInheritance(myMap, parentsMap, calendarPrefix + "narrow.AmPmMarkers");
adjustEraNames(myMap, calendarType);
@@ -253,6 +274,99 @@
handleDateTimeFormatPatterns(DATETIME_PATTERN_KEYS, myMap, parentsMap, calendarType, "DateTimePatterns");
}
+ // if myMap has any empty timezone or metazone names, weed out them.
+ // Fill in any missing abbreviations if locale is "en".
+ for (Iterator<String> it = myMap.keySet().iterator(); it.hasNext();) {
+ String key = it.next();
+ if (key.startsWith(CLDRConverter.TIMEZONE_ID_PREFIX)
+ || key.startsWith(CLDRConverter.METAZONE_ID_PREFIX)) {
+ @SuppressWarnings("unchecked")
+ Map<String, String> nameMap = (Map<String, String>) myMap.get(key);
+ if (nameMap.isEmpty()) {
+ // Some zones have only exemplarCity, which become empty.
+ // Remove those from the map.
+ it.remove();
+ continue;
+ }
+
+ if (id.startsWith("en")) {
+ fillInAbbrs(key, nameMap);
+ }
+ }
+ }
+ for (Iterator<String> it = myMap.keySet().iterator(); it.hasNext();) {
+ String key = it.next();
+ if (key.startsWith(CLDRConverter.TIMEZONE_ID_PREFIX)
+ || key.startsWith(CLDRConverter.METAZONE_ID_PREFIX)) {
+ @SuppressWarnings("unchecked")
+ Map<String, String> nameMap = (Map<String, String>) myMap.get(key);
+ // Convert key/value pairs to an array.
+ String[] names = new String[ZONE_NAME_KEYS.length];
+ int ix = 0;
+ for (String nameKey : ZONE_NAME_KEYS) {
+ String name = nameMap.get(nameKey);
+ if (name == null) {
+ @SuppressWarnings("unchecked")
+ Map<String, String> parentNames = (Map<String, String>) parentsMap.get(key);
+ if (parentNames != null) {
+ name = parentNames.get(nameKey);
+ }
+ }
+ names[ix++] = name;
+ }
+ if (hasNulls(names)) {
+ String metaKey = toMetaZoneKey(key);
+ if (metaKey != null) {
+ Object obj = myMap.get(metaKey);
+ if (obj instanceof String[]) {
+ String[] metaNames = (String[]) obj;
+ for (int i = 0; i < names.length; i++) {
+ if (names[i] == null) {
+ names[i] = metaNames[i];
+ }
+ }
+ } else if (obj instanceof Map) {
+ @SuppressWarnings("unchecked")
+ Map<String, String> m = (Map<String, String>) obj;
+ for (int i = 0; i < names.length; i++) {
+ if (names[i] == null) {
+ names[i] = m.get(ZONE_NAME_KEYS[i]);
+ }
+ }
+ }
+ }
+ // If there are still any nulls, try filling in them from en data.
+ if (hasNulls(names) && !id.equals("en")) {
+ @SuppressWarnings("unchecked")
+ String[] enNames = (String[]) Bundle.getBundle("en").getTargetMap().get(key);
+ if (enNames == null) {
+ if (metaKey != null) {
+ @SuppressWarnings("unchecked")
+ String[] metaNames = (String[]) Bundle.getBundle("en").getTargetMap().get(metaKey);
+ enNames = metaNames;
+ }
+ }
+ if (enNames != null) {
+ for (int i = 0; i < names.length; i++) {
+ if (names[i] == null) {
+ names[i] = enNames[i];
+ }
+ }
+ }
+ // If there are still nulls, give up names.
+ if (hasNulls(names)) {
+ names = null;
+ }
+ }
+ }
+ // replace the Map with the array
+ if (names != null) {
+ myMap.put(key, names);
+ } else {
+ it.remove();
+ }
+ }
+ }
return myMap;
}
@@ -352,20 +466,10 @@
realKeys[index] = realKey;
eraNames[index++] = value;
}
- if (eraNames[0] != null) {
- if (eraNames[1] != null) {
- if (eraNames[2] == null) {
- // Eras -> short.Eras
- // long.Eras -> Eras
- map.put(realKeys[2], map.get(realKeys[1]));
- map.put(realKeys[1], map.get(realKeys[0]));
- }
- } else {
- // long.Eras -> Eras
- map.put(realKeys[1], map.get(realKeys[0]));
+ for (int i = 0; i < eraNames.length; i++) {
+ if (eraNames[i] == null) {
+ map.put(realKeys[i], null);
}
- // remove long.Eras
- map.remove(realKeys[0]);
}
}
@@ -473,6 +577,86 @@
return jrePattern.toString();
}
+ private String toMetaZoneKey(String tzKey) {
+ if (tzKey.startsWith(CLDRConverter.TIMEZONE_ID_PREFIX)) {
+ String tz = tzKey.substring(CLDRConverter.TIMEZONE_ID_PREFIX.length());
+ String meta = CLDRConverter.handlerMetaZones.get(tz);
+ if (meta != null) {
+ return CLDRConverter.METAZONE_ID_PREFIX + meta;
+ }
+ }
+ return null;
+ }
+
+ private void fillInAbbrs(String key, Map<String, String> map) {
+ fillInAbbrs(TZ_STD_LONG_KEY, TZ_STD_SHORT_KEY, map);
+ fillInAbbrs(TZ_DST_LONG_KEY, TZ_DST_SHORT_KEY, map);
+ fillInAbbrs(TZ_GEN_LONG_KEY, TZ_GEN_SHORT_KEY, map);
+
+ // If the standard std is "Standard Time" and daylight std is "Summer Time",
+ // replace the standard std with the generic std to avoid using
+ // the same abbrivation except for Australia time zone names.
+ String std = map.get(TZ_STD_SHORT_KEY);
+ String dst = map.get(TZ_DST_SHORT_KEY);
+ String gen = map.get(TZ_GEN_SHORT_KEY);
+ if (std != null) {
+ if (dst == null) {
+ // if dst is null, create long and short names from the standard
+ // std. ("Something Standard Time" to "Something Daylight Time",
+ // or "Something Time" to "Something Summer Time")
+ String name = map.get(TZ_STD_LONG_KEY);
+ if (name != null) {
+ if (name.contains("Standard Time")) {
+ name = name.replace("Standard Time", "Daylight Time");
+ } else if (name.endsWith("Mean Time")) {
+ name = name.replace("Mean Time", "Summer Time");
+ } else if (name.endsWith(" Time")) {
+ name = name.replace(" Time", " Summer Time");
+ }
+ map.put(TZ_DST_LONG_KEY, name);
+ fillInAbbrs(TZ_DST_LONG_KEY, TZ_DST_SHORT_KEY, map);
+ }
+ }
+ if (gen == null) {
+ String name = map.get(TZ_STD_LONG_KEY);
+ if (name != null) {
+ if (name.endsWith("Standard Time")) {
+ name = name.replace("Standard Time", "Time");
+ } else if (name.endsWith("Mean Time")) {
+ name = name.replace("Mean Time", "Time");
+ }
+ map.put(TZ_GEN_LONG_KEY, name);
+ fillInAbbrs(TZ_GEN_LONG_KEY, TZ_GEN_SHORT_KEY, map);
+ }
+ }
+ }
+ }
+
+ private void fillInAbbrs(String longKey, String shortKey, Map<String, String> map) {
+ String abbr = map.get(shortKey);
+ if (abbr == null) {
+ String name = map.get(longKey);
+ if (name != null) {
+ abbr = toAbbr(name);
+ if (abbr != null) {
+ map.put(shortKey, abbr);
+ }
+ }
+ }
+ }
+
+ private String toAbbr(String name) {
+ String[] substrs = name.split("\\s+");
+ StringBuilder sb = new StringBuilder();
+ for (String s : substrs) {
+ char c = s.charAt(0);
+ if (c >= 'A' && c <= 'Z') {
+ sb.append(c);
+ }
+ }
+ return sb.length() > 0 ? sb.toString() : null;
+ }
+
private void convert(CalendarType calendarType, char cldrLetter, int count, StringBuilder sb) {
switch (cldrLetter) {
case 'G':
@@ -539,4 +723,13 @@
sb.append(c);
}
}
+
+ private static boolean hasNulls(Object[] array) {
+ for (int i = 0; i < array.length; i++) {
+ if (array[i] == null) {
+ return true;
+ }
+ }
+ return false;
+ }
}
--- a/jdk/make/tools/src/build/tools/cldrconverter/BundleGenerator.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/make/tools/src/build/tools/cldrconverter/BundleGenerator.java Mon Dec 17 08:30:06 2012 -0500
@@ -30,8 +30,27 @@
import java.util.SortedSet;
public interface BundleGenerator {
+ static enum BundleType {
+ PLAIN("java.util.ListResourceBundle"),
+ OPEN("sun.util.resources.OpenListResourceBundle"),
+ TIMEZONE("sun.util.resources.TimeZoneNamesBundle");
+
+ private final String pathName, className;
+ private BundleType(String name) {
+ pathName = name;
+ int x = name.lastIndexOf('.');
+ className = name.substring(x + 1);
+ }
+ String getPathName() {
+ return pathName;
+ }
+ String getClassName() {
+ return className;
+ }
+ };
+
public void generateBundle(String packageName, String baseName, String localeID,
- boolean useJava, Map<String, ?> map, boolean open) throws IOException;
+ boolean useJava, Map<String, ?> map, BundleType type) throws IOException;
public void generateMetaInfo(Map<String, SortedSet<String>> metaInfo) throws IOException;
}
--- a/jdk/make/tools/src/build/tools/cldrconverter/CLDRConverter.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/make/tools/src/build/tools/cldrconverter/CLDRConverter.java Mon Dec 17 08:30:06 2012 -0500
@@ -25,6 +25,7 @@
package build.tools.cldrconverter;
+import build.tools.cldrconverter.BundleGenerator.BundleType;
import java.io.File;
import java.nio.file.DirectoryStream;
import java.nio.file.FileSystems;
@@ -58,9 +59,8 @@
static final String CURRENCY_SYMBOL_PREFIX = "currency.symbol.";
static final String CURRENCY_NAME_PREFIX = "currency.displayname.";
static final String TIMEZONE_ID_PREFIX = "timezone.id.";
- static final String TIMEZONE_NAME_PREFIX = "timezone.displayname.";
+ static final String ZONE_NAME_PREFIX = "timezone.displayname.";
static final String METAZONE_ID_PREFIX = "metazone.id.";
- static final String METAZONE_NAME_PREFIX = "metazone.displayname.";
private static SupplementDataParseHandler handlerSuppl;
static NumberingSystemsParseHandler handlerNumbering;
@@ -236,7 +236,14 @@
if (sb.indexOf("root") == -1) {
sb.append("root");
}
- retList.add(new Bundle(id, sb.toString(), null, null));
+ Bundle b = new Bundle(id, sb.toString(), null, null);
+ // Insert the bundle for en at the top so that it will get
+ // processed first.
+ if ("en".equals(id)) {
+ retList.add(0, b);
+ } else {
+ retList.add(b);
+ }
}
}
}
@@ -312,6 +319,7 @@
Map<String, SortedSet<String>> metaInfo = new HashMap<>();
metaInfo.put("LocaleNames", new TreeSet<String>());
metaInfo.put("CurrencyNames", new TreeSet<String>());
+ metaInfo.put("TimeZoneNames", new TreeSet<String>());
metaInfo.put("CalendarData", new TreeSet<String>());
metaInfo.put("FormatData", new TreeSet<String>());
@@ -348,24 +356,28 @@
Map<String, Object> localeNamesMap = extractLocaleNames(targetMap, bundle.getID());
if (!localeNamesMap.isEmpty() || bundle.isRoot()) {
metaInfo.get("LocaleNames").add(toLanguageTag(bundle.getID()));
- bundleGenerator.generateBundle("util", "LocaleNames", bundle.getID(), true, localeNamesMap, true);
+ bundleGenerator.generateBundle("util", "LocaleNames", bundle.getID(), true, localeNamesMap, BundleType.OPEN);
}
}
if (bundleTypes.contains(Bundle.Type.CURRENCYNAMES)) {
Map<String, Object> currencyNamesMap = extractCurrencyNames(targetMap, bundle.getID(), bundle.getCurrencies());
if (!currencyNamesMap.isEmpty() || bundle.isRoot()) {
metaInfo.get("CurrencyNames").add(toLanguageTag(bundle.getID()));
- bundleGenerator.generateBundle("util", "CurrencyNames", bundle.getID(), true, currencyNamesMap, true);
+ bundleGenerator.generateBundle("util", "CurrencyNames", bundle.getID(), true, currencyNamesMap, BundleType.OPEN);
}
}
if (bundleTypes.contains(Bundle.Type.TIMEZONENAMES)) {
Map<String, Object> zoneNamesMap = extractZoneNames(targetMap, bundle.getID());
+ if (!zoneNamesMap.isEmpty() || bundle.isRoot()) {
+ metaInfo.get("TimeZoneNames").add(toLanguageTag(bundle.getID()));
+ bundleGenerator.generateBundle("util", "TimeZoneNames", bundle.getID(), true, zoneNamesMap, BundleType.TIMEZONE);
+ }
}
if (bundleTypes.contains(Bundle.Type.CALENDARDATA)) {
Map<String, Object> calendarDataMap = extractCalendarData(targetMap, bundle.getID());
if (!calendarDataMap.isEmpty() || bundle.isRoot()) {
metaInfo.get("CalendarData").add(toLanguageTag(bundle.getID()));
- bundleGenerator.generateBundle("util", "CalendarData", bundle.getID(), true, calendarDataMap, false);
+ bundleGenerator.generateBundle("util", "CalendarData", bundle.getID(), true, calendarDataMap, BundleType.PLAIN);
}
}
if (bundleTypes.contains(Bundle.Type.FORMATDATA)) {
@@ -373,9 +385,10 @@
// LocaleData.getAvailableLocales depends on having FormatData bundles around
if (!formatDataMap.isEmpty() || bundle.isRoot()) {
metaInfo.get("FormatData").add(toLanguageTag(bundle.getID()));
- bundleGenerator.generateBundle("text", "FormatData", bundle.getID(), true, formatDataMap, false);
+ bundleGenerator.generateBundle("text", "FormatData", bundle.getID(), true, formatDataMap, BundleType.PLAIN);
}
}
+
// For testing
SortedSet<String> allLocales = new TreeSet<>();
allLocales.addAll(metaInfo.get("CurrencyNames"));
@@ -431,6 +444,7 @@
private KeyComparator() {
}
+ @Override
public int compare(String o1, String o2) {
int len1 = o1.length();
int len2 = o2.length();
@@ -476,7 +490,26 @@
}
private static Map<String, Object> extractZoneNames(Map<String, Object> map, String id) {
- return null;
+ Map<String, Object> names = new HashMap<>();
+ for (String tzid : handlerMetaZones.keySet()) {
+ String tzKey = TIMEZONE_ID_PREFIX + tzid;
+ Object data = map.get(tzKey);
+ if (data instanceof String[]) {
+ names.put(tzid, data);
+ } else {
+ String meta = handlerMetaZones.get(tzid);
+ if (meta != null) {
+ String metaKey = METAZONE_ID_PREFIX + meta;
+ data = map.get(metaKey);
+ if (data instanceof String[]) {
+ // Keep the metazone prefix here.
+ names.put(metaKey, data);
+ names.put(tzid, meta);
+ }
+ }
+ }
+ }
+ return names;
}
private static Map<String, Object> extractCalendarData(Map<String, Object> map, String id) {
@@ -494,11 +527,19 @@
copyIfPresent(map, prefix + "standalone.MonthNames", formatData);
copyIfPresent(map, prefix + "MonthAbbreviations", formatData);
copyIfPresent(map, prefix + "standalone.MonthAbbreviations", formatData);
+ copyIfPresent(map, prefix + "MonthNarrow", formatData);
+ copyIfPresent(map, prefix + "standalone.MonthNarrows", formatData);
copyIfPresent(map, prefix + "DayNames", formatData);
+ copyIfPresent(map, prefix + "standalone.DayNames", formatData);
copyIfPresent(map, prefix + "DayAbbreviations", formatData);
+ copyIfPresent(map, prefix + "standalone.DayAbbreviations", formatData);
+ copyIfPresent(map, prefix + "DayNarrows", formatData);
+ copyIfPresent(map, prefix + "standalone.DayNarrows", formatData);
copyIfPresent(map, prefix + "AmPmMarkers", formatData);
+ copyIfPresent(map, prefix + "narrow.AmPmMarkers", formatData);
+ copyIfPresent(map, prefix + "long.Eras", formatData);
copyIfPresent(map, prefix + "Eras", formatData);
- copyIfPresent(map, prefix + "short.Eras", formatData);
+ copyIfPresent(map, prefix + "narrow.Eras", formatData);
copyIfPresent(map, prefix + "TimePatterns", formatData);
copyIfPresent(map, prefix + "DatePatterns", formatData);
copyIfPresent(map, prefix + "DateTimePatterns", formatData);
@@ -560,7 +601,6 @@
if (x == 0 || escapeSpace) {
outBuffer.append('\\');
}
-
outBuffer.append(' ');
break;
case '\\':
@@ -584,7 +624,7 @@
outBuffer.append('f');
break;
default:
- if (!USE_UTF8 && ((aChar < 0x0020) || (aChar > 0x007e))) {
+ if (aChar < 0x0020 || (!USE_UTF8 && aChar > 0x007e)) {
formatter.format("\\u%04x", (int)aChar);
} else {
if (specialSaveChars.indexOf(aChar) != -1) {
--- a/jdk/make/tools/src/build/tools/cldrconverter/LDMLParseHandler.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/make/tools/src/build/tools/cldrconverter/LDMLParseHandler.java Mon Dec 17 08:30:06 2012 -0500
@@ -155,6 +155,9 @@
case "abbreviated":
pushStringArrayEntry(qName, attributes, prefix + "MonthAbbreviations/" + getContainerKey(), 13);
break;
+ case "narrow":
+ pushStringArrayEntry(qName, attributes, prefix + "MonthNarrows/" + getContainerKey(), 13);
+ break;
default:
pushIgnoredContainer(qName);
break;
@@ -191,6 +194,9 @@
case "abbreviated":
pushStringArrayEntry(qName, attributes, prefix + "DayAbbreviations/" + getContainerKey(), 7);
break;
+ case "narrow":
+ pushStringArrayEntry(qName, attributes, prefix + "DayNarrows/" + getContainerKey(), 7);
+ break;
default:
pushIgnoredContainer(qName);
break;
@@ -219,25 +225,36 @@
case "dayPeriodWidth":
// for FormatData
// create string array entry for am/pm. only keeping wide
- if ("wide".equals(attributes.getValue("type"))) {
+ switch (attributes.getValue("type")) {
+ case "wide":
pushStringArrayEntry(qName, attributes, "AmPmMarkers/" + getContainerKey(), 2);
- } else {
+ break;
+ case "narrow":
+ pushStringArrayEntry(qName, attributes, "narrow.AmPmMarkers/" + getContainerKey(), 2);
+ break;
+ default:
pushIgnoredContainer(qName);
+ break;
}
break;
case "dayPeriod":
// for FormatData
// add to string array entry of AmPmMarkers element
- switch (attributes.getValue("type")) {
- case "am":
- pushStringArrayElement(qName, attributes, 0);
- break;
- case "pm":
- pushStringArrayElement(qName, attributes, 1);
- break;
- default:
+ if (attributes.getValue("alt") == null) {
+ switch (attributes.getValue("type")) {
+ case "am":
+ pushStringArrayElement(qName, attributes, 0);
+ break;
+ case "pm":
+ pushStringArrayElement(qName, attributes, 1);
+ break;
+ default:
+ pushIgnoredContainer(qName);
+ break;
+ }
+ } else {
+ // discard alt values
pushIgnoredContainer(qName);
- break;
}
break;
case "eraNames":
@@ -269,7 +286,7 @@
assert currentContainer instanceof IgnoredContainer;
pushIgnoredContainer(qName);
} else {
- String key = currentCalendarType.keyElementName() + "short.Eras";
+ String key = currentCalendarType.keyElementName() + "narrow.Eras";
pushStringArrayEntry(qName, attributes, key, currentCalendarType.getEraLength(qName));
}
break;
@@ -301,15 +318,15 @@
break;
case "zone":
{
- String zone = attributes.getValue("type");
+ String tzid = attributes.getValue("type"); // Olson tz id
zonePrefix = CLDRConverter.TIMEZONE_ID_PREFIX;
- put(zonePrefix + zone, new HashMap<String, String>());
- pushKeyContainer(qName, attributes, zone);
+ put(zonePrefix + tzid, new HashMap<String, String>());
+ pushKeyContainer(qName, attributes, tzid);
}
break;
case "metazone":
{
- String zone = attributes.getValue("type");
+ String zone = attributes.getValue("type"); // LDML meta zone id
zonePrefix = CLDRConverter.METAZONE_ID_PREFIX;
put(zonePrefix + zone, new HashMap<String, String>());
pushKeyContainer(qName, attributes, zone);
@@ -323,16 +340,12 @@
zoneNameStyle = "short";
pushContainer(qName, attributes);
break;
- case "generic": // not used in JDK
- pushIgnoredContainer(qName);
+ case "generic": // generic name
+ case "standard": // standard time name
+ case "daylight": // daylight saving (summer) time name
+ pushStringEntry(qName, attributes, CLDRConverter.ZONE_NAME_PREFIX + qName + "." + zoneNameStyle);
break;
- case "standard": // standard time
- pushStringEntry(qName, attributes, CLDRConverter.TIMEZONE_NAME_PREFIX + "standard." + zoneNameStyle);
- break;
- case "daylight":
- pushStringEntry(qName, attributes, CLDRConverter.TIMEZONE_NAME_PREFIX + "daylight." + zoneNameStyle);
- break;
- case "exemplarCity":
+ case "exemplarCity": // not used in JDK
pushIgnoredContainer(qName);
break;
@@ -530,6 +543,7 @@
case "timeZoneNames":
zonePrefix = null;
break;
+ case "generic":
case "standard":
case "daylight":
if (zonePrefix != null && (currentContainer instanceof Entry)) {
--- a/jdk/make/tools/src/build/tools/cldrconverter/MetaZonesParseHandler.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/make/tools/src/build/tools/cldrconverter/MetaZonesParseHandler.java Mon Dec 17 08:30:06 2012 -0500
@@ -46,8 +46,9 @@
return null;
}
+ // metaZone: ID -> metazone
+ // per locale: ID -> names, metazone -> names
@Override
- @SuppressWarnings("fallthrough")
public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException {
switch (qName) {
case "timezone":
--- a/jdk/make/tools/src/build/tools/cldrconverter/ResourceBundleGenerator.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/make/tools/src/build/tools/cldrconverter/ResourceBundleGenerator.java Mon Dec 17 08:30:06 2012 -0500
@@ -28,14 +28,16 @@
import java.io.File;
import java.io.IOException;
import java.io.PrintWriter;
+import java.util.Formatter;
+import java.util.HashSet;
import java.util.Map;
+import java.util.Set;
import java.util.SortedSet;
class ResourceBundleGenerator implements BundleGenerator {
-
@Override
public void generateBundle(String packageName, String baseName, String localeID, boolean useJava,
- Map<String, ?> map, boolean open) throws IOException {
+ Map<String, ?> map, BundleType type) throws IOException {
String suffix = useJava ? ".java" : ".properties";
String lang = CLDRConverter.getLanguageCode(localeID);
String dirName = CLDRConverter.DESTINATION_DIR + File.separator + "sun" + File.separator
@@ -67,6 +69,28 @@
encoding = "iso-8859-1";
}
+ Formatter fmt = null;
+ if (type == BundleType.TIMEZONE) {
+ fmt = new Formatter();
+ Set<String> metaKeys = new HashSet<>();
+ for (String key : map.keySet()) {
+ if (key.startsWith(CLDRConverter.METAZONE_ID_PREFIX)) {
+ String meta = key.substring(CLDRConverter.METAZONE_ID_PREFIX.length());
+ String[] value;
+ value = (String[]) map.get(key);
+ fmt.format(" final String[] %s = new String[] {\n", meta);
+ for (String s : value) {
+ fmt.format(" \"%s\",\n", CLDRConverter.saveConvert(s, useJava));
+ }
+ fmt.format(" };\n");
+ metaKeys.add(key);
+ }
+ }
+ for (String key : metaKeys) {
+ map.remove(key);
+ }
+ }
+
try (PrintWriter out = new PrintWriter(file, encoding)) {
// Output copyright headers
out.println(CopyrightHeaders.getOpenJDKCopyright());
@@ -74,16 +98,15 @@
if (useJava) {
out.println("package sun." + packageName + ";\n");
- if (open) {
- out.println("import sun.util.resources.OpenListResourceBundle;\n");
- out.println("public class " + baseName + ("root".equals(localeID) ? "" : "_" + localeID) + " extends OpenListResourceBundle {");
- } else {
- out.println("import java.util.ListResourceBundle;\n");
- out.println("public class " + baseName + ("root".equals(localeID) ? "" : "_" + localeID) + " extends ListResourceBundle {");
+ out.printf("import %s;\n\n", type.getPathName());
+ out.printf("public class %s%s extends %s {\n", baseName, "root".equals(localeID) ? "" : "_" + localeID, type.getClassName());
+
+ out.println(" @Override\n" +
+ " protected final Object[][] getContents() {");
+ if (fmt != null) {
+ out.print(fmt.toString());
}
- out.println(" @Override\n" +
- " protected final Object[][] getContents() {\n" +
- " final Object[][] data = new Object[][] {");
+ out.println(" final Object[][] data = new Object[][] {");
}
for (String key : map.keySet()) {
if (useJava) {
@@ -91,7 +114,11 @@
if (value == null) {
CLDRConverter.warning("null value for " + key);
} else if (value instanceof String) {
- out.println(" { \"" + key + "\", \"" + CLDRConverter.saveConvert((String) value, useJava) + "\" },");
+ if (type == BundleType.TIMEZONE) {
+ out.printf(" { \"%s\", %s },\n", key, CLDRConverter.saveConvert((String) value, useJava));
+ } else {
+ out.printf(" { \"%s\", \"%s\" },\n", key, CLDRConverter.saveConvert((String) value, useJava));
+ }
} else if (value instanceof String[]) {
String[] values = (String[]) value;
out.println(" { \"" + key + "\",\n new String[] {");
--- a/jdk/makefiles/CompileDemos.gmk Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/makefiles/CompileDemos.gmk Mon Dec 17 08:30:06 2012 -0500
@@ -194,15 +194,6 @@
##################################################################################################
-# Why do we install a demo jar into the main jre/lib/ext????????????????
-$(JDK_OUTPUTDIR)/lib/ext/zipfs.jar : $(JDK_OUTPUTDIR)/demo/nio/zipfs/zipfs.jar
- $(MKDIR) -p $(@D)
- $(CP) $< $@
-
-BUILD_DEMOS += $(JDK_OUTPUTDIR)/lib/ext/zipfs.jar
-
-##################################################################################################
-
# In the old makefiles, j2dbench was not compiled.
#$(eval $(call SetupDemo,J2DBench,java2d,/src,,j2dbench/J2DBench))
--- a/jdk/makefiles/CompileJavaClasses.gmk Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/makefiles/CompileJavaClasses.gmk Mon Dec 17 08:30:06 2012 -0500
@@ -221,24 +221,10 @@
EXFILES+=-linux-arm.java \
-linux-ppc.java
-# TODO: Is this necessary?
ifeq ($(OPENJDK_TARGET_OS), windows)
EXFILES+=sun/nio/ch/AbstractPollSelectorImpl.java \
- sun/nio/ch/DevPollArrayWrapper.java \
- sun/nio/ch/DevPollSelectorImpl.java \
- sun/nio/ch/DevPollSelectorProvider.java \
- sun/nio/ch/InheritedChannel.java \
sun/nio/ch/PollSelectorProvider.java \
- sun/nio/ch/PollSelectorImpl.java \
- sun/nio/ch/Port.java \
- sun/nio/ch/SimpleAsynchronousFileChannelImpl.java \
- sun/nio/ch/SolarisAsynchronousChannelProvider.java \
- sun/nio/ch/SolarisEventPort.java \
- sun/nio/ch/UnixAsynchronousServerSocketChannelImpl.java \
- sun/nio/ch/UnixAsynchronousSocketChannelImpl.java
- EXFILES+=sun/net/sdp/SdpProvider.java
-else
- EXFILES+=sun/net/www/protocol/http/ntlm/NTLMAuthSequence.java
+ sun/nio/ch/SimpleAsynchronousFileChannelImpl.java
endif
# Exclude nimbus files from rt.jar
@@ -339,29 +325,14 @@
##########################################################################################
-#
-# This is an empty jar (only contains manifest) and fits poorly into framework...
-# create simple rule instead
-#
-MANAGEMENT_AGENT_JAR_DEPS := $(JDK_TOPDIR)/src/share/classes/sun/management/manifest
-
-$(JDK_OUTPUTDIR)/lib/management-agent.jar : $(JDK_TOPDIR)/src/share/classes/sun/management/manifest
- $(JAR) cfm $@ $(JDK_TOPDIR)/src/share/classes/sun/management/manifest
-
-JARS += $(JDK_OUTPUTDIR)/lib/management-agent.jar
-
-##########################################################################################
-
ifndef OPENJDK
-$(eval $(call SetupJavaCompilation,BUILD_ALTCLASSES_JAR,\
+ $(eval $(call SetupJavaCompilation,BUILD_ALTCLASSES,\
SETUP:=GENERATE_JDKBYTECODE,\
SRC:=$(JDK_TOPDIR)/src/closed/share/altclasses, \
- BIN:=$(JDK_OUTPUTDIR)/altclasses_classes,\
- JAR:=$(JDK_OUTPUTDIR)/lib/alt-rt.jar))
+ BIN:=$(JDK_OUTPUTDIR)/altclasses_classes))
-$(BUILD_ALTCLASSES_JAR): $(BUILD_JDK)
-JARS += $(JDK_OUTPUTDIR)/lib/alt-rt.jar
+ $(BUILD_ALTCLASSES): $(BUILD_JDK)
endif
@@ -384,7 +355,7 @@
SERVER_DIR:=$(SJAVAC_SERVER_DIR),\
SERVER_JVM:=$(SJAVAC_SERVER_JAVA)))
-$(eval $(call SetupJavaCompilation,BUILD_JOBJC_JAR,\
+$(eval $(call SetupJavaCompilation,BUILD_JOBJC,\
SETUP:=GENERATE_15BYTECODE,\
DISABLE_SJAVAC:=true,\
SRC:=$(JDK_TOPDIR)/src/macosx/native/jobjc/src/core/java \
@@ -396,11 +367,9 @@
JAR:=$(JDK_OUTPUTDIR)/lib/JObjC.jar, \
JARINDEX := true))
-$(BUILD_JOBJC_JAR) : $(BUILD_JDK)
+$(BUILD_JOBJC) : $(BUILD_JDK)
-JARS += $(JDK_OUTPUTDIR)/lib/JObjC.jar
-
-$(eval $(call SetupJavaCompilation,BUILD_JOBJC_HEADERS_JAR,\
+$(eval $(call SetupJavaCompilation,BUILD_JOBJC_HEADERS,\
SETUP:=GENERATE_JDKBYTECODE,\
SRC:=$(JDK_TOPDIR)/src/macosx/native/jobjc/src/core/java \
$(JDK_TOPDIR)/src/macosx/native/jobjc/src/runtime-additions/java \
@@ -410,16 +379,15 @@
BIN:=$(JDK_OUTPUTDIR)/jobjc_classes_headers,\
HEADERS:=$(JDK_OUTPUTDIR)/gensrc_headers_jobjc))
-$(BUILD_JOBJC_HEADERS_JAR) : $(BUILD_JDK)
-
-JARS += $(BUILD_JOBJC_HEADERS_JAR)
+$(BUILD_JOBJC_HEADERS) : $(BUILD_JDK)
endif
##########################################################################################
# copy with -a to preserve timestamps so dependencies down the line aren't messed up
-all: $(BUILD_JDK) $(JARS) $(COPY_EXTRA) $(JDK_OUTPUTDIR)/classes/META-INF/services/com.sun.tools.xjc.Plugin \
+all: $(BUILD_JDK) $(BUILD_ALTCLASSES) $(BUILD_JOBJC) $(BUILD_JOBJC_HEADERS) $(COPY_EXTRA) \
+ $(JDK_OUTPUTDIR)/classes/META-INF/services/com.sun.tools.xjc.Plugin \
$(JDK_OUTPUTDIR)/gensrc_headers/_the.jdk.base.headers
.PHONY: all
--- a/jdk/makefiles/CompileNativeLibraries.gmk Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/makefiles/CompileNativeLibraries.gmk Mon Dec 17 08:30:06 2012 -0500
@@ -125,7 +125,7 @@
LIBVERIFY_OPTIMIZATION:=HIGH
ifneq ($(findstring $(OPENJDK_TARGET_OS),solaris linux),)
- ifeq ($(ENABLE_DEBUG_SYMBOLS), yes)
+ ifeq ($(ENABLE_DEBUG_SYMBOLS), true)
LIBVERIFY_OPTIMIZATION:=LOW
endif
endif
@@ -1645,7 +1645,7 @@
LIBMANAGEMENT_OPTIMIZATION:=HIGH
ifneq ($(findstring $(OPENJDK_TARGET_OS),solaris linux),)
- ifeq ($(ENABLE_DEBUG_SYMBOLS), yes)
+ ifeq ($(ENABLE_DEBUG_SYMBOLS), true)
LIBMANAGEMENT_OPTIMIZATION:=LOW
endif
endif
@@ -1688,7 +1688,7 @@
LIBHPROF_OPTIMIZATION:=HIGHEST
ifneq ($(findstring $(OPENJDK_TARGET_OS),solaris linux),)
- ifeq ($(ENABLE_DEBUG_SYMBOLS), yes)
+ ifeq ($(ENABLE_DEBUG_SYMBOLS), true)
LIBHPROF_OPTIMIZATION:=LOW
endif
endif
@@ -2236,7 +2236,7 @@
$(call SET_SHARED_LIBRARY_ORIGIN),\
LDFLAGS_SUFFIX_linux:=-lc -lpthread,\
LDFLAGS_SUFFIX_windows:=$(WIN_JAVA_LIB) advapi32.lib user32.lib version.lib, \
- LDFLAGS_SUFFIX:=-lm $(LDFLAGS_JDKLIB_SUFFIX),\
+ LDFLAGS_SUFFIX_posix:=-lm -ljava -ljvm,\
VERSIONINFO_RESOURCE:=$(JDK_TOPDIR)/src/closed/share/native/sun/java2d/cmm/kcms/cmm.rc,\
VERSIONINFO_RESOURCE:=$(JDK_TOPDIR)/src/closed/share/native/sun/java2d/cmm/kcms/cmm.rc,\
RC_FLAGS:=$(RC_FLAGS)\
@@ -2540,6 +2540,7 @@
##########################################################################################
ifneq ($(OPENJDK_TARGET_OS), windows)
+ifneq ($(OPENJDK_TARGET_OS), macosx)
$(eval $(call SetupNativeCompilation,BUILD_LIBJ2GSS,\
LIBRARY:=j2gss,\
OUTPUT_DIR:=$(INSTALL_LIBRARIES_HERE),\
@@ -2559,6 +2560,7 @@
BUILD_LIBRARIES += $(BUILD_LIBJ2GSS)
endif
+endif
##########################################################################################
--- a/jdk/makefiles/CreateJars.gmk Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/makefiles/CreateJars.gmk Mon Dec 17 08:30:06 2012 -0500
@@ -446,9 +446,7 @@
$(SUNPKCS11_JAR_DST) : $(SUNPKCS11_JAR_SRC)
@$(ECHO) $(LOG_INFO) "\n>>>Installing prebuilt SunPKCS11 provider..."
- $(MKDIR) -p $(@D)
- $(RM) $@
- $(CP) $< $@
+ $(install-file)
else
@@ -476,9 +474,7 @@
$(SUNEC_JAR_DST) : $(SUNEC_JAR_SRC)
@$(ECHO) $(LOG_INFO) "\n>>>Installing prebuilt SunEC provider..."
- $(MKDIR) -p $(@D)
- $(RM) $@
- $(CP) $< $@
+ $(install-file)
else
@@ -518,9 +514,8 @@
$(SUNJCE_PROVIDER_JAR_DST) : $(SUNJCE_PROVIDER_JAR_SRC)
@$(ECHO) $(LOG_INFO) "\n>>>Installing prebuilt SunJCE provider..."
- $(MKDIR) -p $(@D)
- $(RM) $@
- $(CP) $< $@
+ $(install-file)
+
else
$(eval $(call SetupArchive,BUILD_SUNJCE_PROVIDER_JAR,,\
@@ -545,9 +540,7 @@
$(JCE_JAR_DST) : $(JCE_JAR_SRC)
@$(ECHO) $(LOG_INFO) "\n>>>Installing prebuilt jce.jar..."
- $(MKDIR) -p $(@D)
- $(RM) $@
- $(CP) $< $@
+ $(install-file)
else
@@ -567,75 +560,85 @@
##########################################################################################
-ifdef OPENJDK
+US_EXPORT_POLICY_JAR_DST := $(IMAGES_OUTPUTDIR)/lib/security/US_export_policy.jar
+
+ifndef OPENJDK
+
+
+ $(US_EXPORT_POLICY_JAR_DST): $(JDK_TOPDIR)/make/closed/tools/crypto/jce/US_export_policy.jar
+ $(ECHO) $(LOG_INFO) Copying $(@F)
+ $(install-file)
+
+else
-#
-# TODO fix so that SetupArchive does not write files into SRCS
-# then we don't need this extra copying
-#
-# NOTE: We currently do not place restrictions on our limited export
-# policy. This was not a typo.
-#
-US_EXPORT_POLICY_JAR_DST := $(IMAGES_OUTPUTDIR)/lib/security/US_export_policy.jar
-US_EXPORT_POLICY_JAR_SRC_DIR := $(JDK_TOPDIR)/make/javax/crypto/policy/unlimited
-US_EXPORT_POLICY_JAR_TMP := $(IMAGES_OUTPUTDIR)/US_export_policy_jar.tmp
+ #
+ # TODO fix so that SetupArchive does not write files into SRCS
+ # then we don't need this extra copying
+ #
+ # NOTE: We currently do not place restrictions on our limited export
+ # policy. This was not a typo.
+ #
+ US_EXPORT_POLICY_JAR_SRC_DIR := $(JDK_TOPDIR)/make/javax/crypto/policy/unlimited
+ US_EXPORT_POLICY_JAR_TMP := $(IMAGES_OUTPUTDIR)/US_export_policy_jar.tmp
-$(US_EXPORT_POLICY_JAR_TMP)/% : $(US_EXPORT_POLICY_JAR_SRC_DIR)/%
- $(MKDIR) -p $(@D)
- $(RM) $@
- $(CP) $< $@
+ $(US_EXPORT_POLICY_JAR_TMP)/% : $(US_EXPORT_POLICY_JAR_SRC_DIR)/%
+ $(install-file)
-US_EXPORT_POLICY_JAR_DEPS := $(US_EXPORT_POLICY_JAR_TMP)/default_US_export.policy
+ US_EXPORT_POLICY_JAR_DEPS := $(US_EXPORT_POLICY_JAR_TMP)/default_US_export.policy
-$(eval $(call SetupArchive,BUILD_US_EXPORT_POLICY_JAR,$(US_EXPORT_POLICY_JAR_DEPS),\
+ $(eval $(call SetupArchive,BUILD_US_EXPORT_POLICY_JAR,$(US_EXPORT_POLICY_JAR_DEPS),\
SRCS:=$(US_EXPORT_POLICY_JAR_TMP), \
SUFFIXES:= .policy,\
JAR:=$(US_EXPORT_POLICY_JAR_DST), \
EXTRA_MANIFEST_ATTR := Crypto-Strength: unlimited, \
SKIP_METAINF := true))
-JARS += $(US_EXPORT_POLICY_JAR_DST)
+endif
-endif
+JARS += $(US_EXPORT_POLICY_JAR_DST)
##########################################################################################
+LOCAL_POLICY_JAR_DST := $(IMAGES_OUTPUTDIR)/lib/security/local_policy.jar
-ifdef OPENJDK
+ifndef OPENJDK
+
+ $(LOCAL_POLICY_JAR_DST): $(JDK_TOPDIR)/make/closed/tools/crypto/jce/local_policy.jar
+ $(ECHO) $(LOG_INFO) Copying $(@F)
+ $(install-file)
-#
-# TODO fix so that SetupArchive does not write files into SRCS
-# then we don't need this extra copying
-#
-LOCAL_POLICY_JAR_DST := $(IMAGES_OUTPUTDIR)/lib/security/local_policy.jar
-LOCAL_POLICY_JAR_TMP := $(IMAGES_OUTPUTDIR)/local_policy_jar.tmp
+else
+
+ #
+ # TODO fix so that SetupArchive does not write files into SRCS
+ # then we don't need this extra copying
+ #
+ LOCAL_POLICY_JAR_TMP := $(IMAGES_OUTPUTDIR)/local_policy_jar.tmp
-ifeq ($(UNLIMITED_CRYPTO), true)
- LOCAL_POLICY_JAR_SRC_DIR := $(JDK_TOPDIR)/make/javax/crypto/policy/unlimited
- LOCAL_POLICY_JAR_DEPS := $(LOCAL_POLICY_JAR_TMP)/default_local.policy
- LOCAL_POLICY_JAR_ATTR := Crypto-Strength: unlimited
-else
- LOCAL_POLICY_JAR_SRC_DIR := $(JDK_TOPDIR)/make/javax/crypto/policy/limited
- LOCAL_POLICY_JAR_DEPS := $(LOCAL_POLICY_JAR_TMP)/exempt_local.policy \
- $(LOCAL_POLICY_JAR_TMP)/default_local.policy
- LOCAL_POLICY_JAR_ATTR := Crypto-Strength: limited
-endif
+ ifeq ($(UNLIMITED_CRYPTO), true)
+ LOCAL_POLICY_JAR_SRC_DIR := $(JDK_TOPDIR)/make/javax/crypto/policy/unlimited
+ LOCAL_POLICY_JAR_DEPS := $(LOCAL_POLICY_JAR_TMP)/default_local.policy
+ LOCAL_POLICY_JAR_ATTR := Crypto-Strength: unlimited
+ else
+ LOCAL_POLICY_JAR_SRC_DIR := $(JDK_TOPDIR)/make/javax/crypto/policy/limited
+ LOCAL_POLICY_JAR_DEPS := $(LOCAL_POLICY_JAR_TMP)/exempt_local.policy \
+ $(LOCAL_POLICY_JAR_TMP)/default_local.policy
+ LOCAL_POLICY_JAR_ATTR := Crypto-Strength: limited
+ endif
-$(LOCAL_POLICY_JAR_TMP)/% : $(LOCAL_POLICY_JAR_SRC_DIR)/%
- $(MKDIR) -p $(@D)
- $(RM) $@
- $(CP) $< $@
+ $(LOCAL_POLICY_JAR_TMP)/% : $(LOCAL_POLICY_JAR_SRC_DIR)/%
+ $(install-file)
-$(eval $(call SetupArchive,BUILD_LOCAL_POLICY_JAR,$(LOCAL_POLICY_JAR_DEPS),\
+ $(eval $(call SetupArchive,BUILD_LOCAL_POLICY_JAR,$(LOCAL_POLICY_JAR_DEPS),\
SRCS:=$(LOCAL_POLICY_JAR_TMP),\
SUFFIXES:= .policy,\
JAR:=$(LOCAL_POLICY_JAR_DST), \
EXTRA_MANIFEST_ATTR := $(LOCAL_POLICY_JAR_ATTR), \
SKIP_METAINF := true))
-JARS += $(LOCAL_POLICY_JAR_DST)
+endif
-endif
+JARS += $(LOCAL_POLICY_JAR_DST)
##########################################################################################
@@ -648,9 +651,7 @@
$(SUNMSCAPI_JAR_DST) : $(SUNMSCAPI_JAR_SRC)
@$(ECHO) $(LOG_INFO) "\n>>>Installing prebuilt SunMSCAPI provider..."
- $(MKDIR) -p $(@D)
- $(RM) $@
- $(CP) $< $@
+ $(install-file)
else
@@ -676,9 +677,7 @@
$(UCRYPTO_JAR_DST) : $(UCRYPTO_JAR_SRC)
@$(ECHO) $(LOG_INFO) "\n>>>Installing prebuilt OracleUcrypto provider..."
- $(MKDIR) -p $(@D)
- $(RM) $@
- $(CP) $< $@
+ $(install-file)
JARS += $(UCRYPTO_JAR_DST)
@@ -897,14 +896,10 @@
$(LAUNCHER_SRC_FILES)))
$(IMAGES_OUTPUTDIR)/src/launcher/%: $(JDK_TOPDIR)/src/share/bin/%
- $(MKDIR) -p $(@D)
- $(RM) $@
- $(CP) $< $@
+ $(install-file)
$(IMAGES_OUTPUTDIR)/src/launcher/%: $(JDK_TOPDIR)/src/$(OPENJDK_TARGET_OS_API_DIR)/bin/%
- $(MKDIR) -p $(@D)
- $(RM) $@
- $(CP) $< $@
+ $(install-file)
$(IMAGES_OUTPUTDIR)/src.zip: $(LAUNCHER_ZIP_SRC)
@@ -922,6 +917,55 @@
##########################################################################################
+#
+# This is an empty jar (only contains manifest) and fits poorly into framework...
+# create simple rule instead
+#
+$(IMAGES_OUTPUTDIR)/lib/management-agent.jar : $(JDK_TOPDIR)/src/share/classes/sun/management/manifest
+ $(JAR) cfm $@ $<
+
+JARS += $(IMAGES_OUTPUTDIR)/lib/management-agent.jar
+
+##########################################################################################
+
+$(IMAGES_OUTPUTDIR)/lib/ext/zipfs.jar : $(JDK_OUTPUTDIR)/demo/nio/zipfs/zipfs.jar
+ $(install-file)
+
+JARS += $(IMAGES_OUTPUTDIR)/lib/ext/zipfs.jar
+
+##########################################################################################
+
+ifeq ($(OPENJDK_TARGET_OS),macosx)
+ $(eval $(call SetupArchive,BUILD_JOBJC_JAR,,\
+ SRCS:=$(JDK_OUTPUTDIR)/jobjc_classes,\
+ JAR:=$(IMAGES_OUTPUTDIR)/lib/JObjC.jar, \
+ JARINDEX:=true))
+
+ JARS += $(IMAGES_OUTPUTDIR)/lib/JObjC.jar
+endif
+
+##########################################################################################
+
+ifndef OPENJDK
+ $(eval $(call SetupArchive,BUILD_ALT_RT_JAR,,\
+ SRCS:=$(JDK_OUTPUTDIR)/altclasses_classes,\
+ JAR:=$(IMAGES_OUTPUTDIR)/lib/alt-rt.jar))
+
+ JARS += $(IMAGES_OUTPUTDIR)/lib/alt-rt.jar
+endif
+
+##########################################################################################
+
+# This file is imported from hotspot in Import.gmk. Copying it into images/lib so that
+# all jars can be found in one place when creating images in Images.gmk. It needs to be
+# done here so that clean targets can be simple and accurate.
+$(IMAGES_OUTPUTDIR)/lib/sa-jdi.jar: $(JDK_OUTPUTDIR)/lib/sa-jdi.jar
+ $(install-file)
+
+JARS += $(IMAGES_OUTPUTDIR)/lib/sa-jdi.jar
+
+##########################################################################################
+
-include $(CUSTOM_MAKE_DIR)/CreateJars.gmk
##########################################################################################
--- a/jdk/makefiles/Images.gmk Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/makefiles/Images.gmk Mon Dec 17 08:30:06 2012 -0500
@@ -211,10 +211,11 @@
endif
# Find all files to copy from $(JDK_OUTPUTDIR)/lib
+# Jar files are not expected to be here
ALL_JDKOUT_LIB_LIST := $(shell $(FIND) $(JDK_OUTPUTDIR)/lib \( -type f -o -type l \) -a ! \
- \( -name "_the*" -o -name "javac_state " \) )
+ \( -name "_the*" -o -name "javac_state " -o -name "*.jar" \) )
# Find all files to copy from $(IMAGES_OUTPUTDIR)/lib
-# This might not exist if building overlay-images
+# This is were the jar files are and might not exist if building overlay-images
ifneq ($(wildcard $(IMAGES_OUTPUTDIR)/lib),)
ALL_IMAGES_LIB_LIST := $(shell $(FIND) $(IMAGES_OUTPUTDIR)/lib \( -type f -o -type l \) -a ! \
\( -name "_the*" -o -name "javac_state " \) )
--- a/jdk/makefiles/Import.gmk Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/makefiles/Import.gmk Mon Dec 17 08:30:06 2012 -0500
@@ -231,22 +231,6 @@
endef
endif
-ifndef OPENJDK
-
-IMPORT_TARGET_FILES += \
- $(JDK_OUTPUTDIR)/lib/security/US_export_policy.jar \
- $(JDK_OUTPUTDIR)/lib/security/local_policy.jar
-
-$(JDK_OUTPUTDIR)/lib/security/local_policy.jar: $(JDK_TOPDIR)/make/closed/tools/crypto/jce/local_policy.jar
- $(ECHO) $(LOG_INFO) Copying $(@F)
- $(install-file)
-
-$(JDK_OUTPUTDIR)/lib/security/US_export_policy.jar: $(JDK_TOPDIR)/make/closed/tools/crypto/jce/US_export_policy.jar
- $(ECHO) $(LOG_INFO) Copying $(@F)
- $(install-file)
-
-endif # OPENJDK
-
#######
all: $(IMPORT_TARGET_FILES)
--- a/jdk/src/macosx/classes/com/apple/laf/AquaLookAndFeel.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/macosx/classes/com/apple/laf/AquaLookAndFeel.java Mon Dec 17 08:30:06 2012 -0500
@@ -714,7 +714,8 @@
"PopupMenu.font", menuFont,
"PopupMenu.background", menuBackgroundColor,
- "PopupMenu.translucentBackground", translucentWhite,
+ // Fix for 7154516: make popups opaque
+ "PopupMenu.translucentBackground", white,
"PopupMenu.foreground", menuForegroundColor,
"PopupMenu.selectionBackground", menuSelectedBackgroundColor,
"PopupMenu.selectionForeground", menuSelectedForegroundColor,
--- a/jdk/src/macosx/classes/sun/awt/CGraphicsConfig.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/macosx/classes/sun/awt/CGraphicsConfig.java Mon Dec 17 08:30:06 2012 -0500
@@ -31,13 +31,16 @@
import sun.java2d.SurfaceData;
import sun.java2d.opengl.CGLLayer;
+import sun.lwawt.LWGraphicsConfig;
import sun.lwawt.macosx.CPlatformView;
-public class CGraphicsConfig extends GraphicsConfiguration {
+public abstract class CGraphicsConfig extends GraphicsConfiguration
+ implements LWGraphicsConfig {
+
private final CGraphicsDevice device;
private ColorModel colorModel;
- public CGraphicsConfig(CGraphicsDevice device) {
+ protected CGraphicsConfig(CGraphicsDevice device) {
this.device = device;
}
@@ -84,88 +87,20 @@
return new AffineTransform(xscale, 0.0, 0.0, yscale, 0.0, 0.0);
}
-
- /**
- * The following methods are invoked from CToolkit.java and
- * LWWindowPeer.java rather than having the native
- * implementations hardcoded in those classes. This way the appropriate
- * actions are taken based on the peer's GraphicsConfig, whether it is
- * an CGLGraphicsConfig or something else.
- */
-
/**
* Creates a new SurfaceData that will be associated with the given
* LWWindowPeer.
*/
- public SurfaceData createSurfaceData(CPlatformView pView) {
- throw new UnsupportedOperationException("not implemented");
- }
+ public abstract SurfaceData createSurfaceData(CPlatformView pView);
/**
* Creates a new SurfaceData that will be associated with the given
* CGLLayer.
*/
- public SurfaceData createSurfaceData(CGLLayer layer) {
- throw new UnsupportedOperationException("not implemented");
- }
-
- /**
- * Creates a new hidden-acceleration image of the given width and height
- * that is associated with the target Component.
- */
- public Image createAcceleratedImage(Component target,
- int width, int height)
- {
- throw new UnsupportedOperationException("not implemented");
- }
-
- /**
- * The following methods correspond to the multibuffering methods in
- * LWWindowPeer.java...
- */
-
- /**
- * Attempts to create a native backbuffer for the given peer. If
- * the requested configuration is not natively supported, an AWTException
- * is thrown. Otherwise, if the backbuffer creation is successful, a
- * handle to the native backbuffer is returned.
- */
- public long createBackBuffer(CPlatformView pView,
- int numBuffers, BufferCapabilities caps)
- throws AWTException
- {
- throw new UnsupportedOperationException("not implemented");
- }
-
- public void destroyBackBuffer(long backBuffer)
- throws AWTException
- {
- throw new UnsupportedOperationException("not implemented");
- }
-
- /**
- * Creates a VolatileImage that essentially wraps the target Component's
- * backbuffer, using the provided backbuffer handle.
- */
- public VolatileImage createBackBufferImage(Component target,
- long backBuffer)
- {
- throw new UnsupportedOperationException("not implemented");
- }
-
- /**
- * Performs the native flip operation for the given target Component.
- */
- public void flip(CPlatformView delegate,
- Component target, VolatileImage xBackBuffer,
- int x1, int y1, int x2, int y2,
- BufferCapabilities.FlipContents flipAction)
- {
- throw new UnsupportedOperationException("not implemented");
- }
+ public abstract SurfaceData createSurfaceData(CGLLayer layer);
@Override
- public boolean isTranslucencyCapable() {
+ public final boolean isTranslucencyCapable() {
//we know for sure we have capable config :)
return true;
}
--- a/jdk/src/macosx/classes/sun/java2d/opengl/CGLGraphicsConfig.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/macosx/classes/sun/java2d/opengl/CGLGraphicsConfig.java Mon Dec 17 08:30:06 2012 -0500
@@ -27,7 +27,6 @@
import java.awt.AWTException;
import java.awt.BufferCapabilities;
-import java.awt.Color;
import java.awt.Component;
import java.awt.Graphics;
import java.awt.Graphics2D;
@@ -48,13 +47,10 @@
import sun.awt.CGraphicsConfig;
import sun.awt.CGraphicsDevice;
-import sun.awt.TextureSizeConstraining;
import sun.awt.image.OffScreenImage;
import sun.awt.image.SunVolatileImage;
-import sun.awt.image.SurfaceManager;
import sun.java2d.Disposer;
import sun.java2d.DisposerRecord;
-import sun.java2d.SunGraphics2D;
import sun.java2d.Surface;
import sun.java2d.SurfaceData;
import sun.java2d.opengl.OGLContext.OGLContextCaps;
@@ -63,18 +59,19 @@
import sun.java2d.pipe.hw.ContextCapabilities;
import static sun.java2d.opengl.OGLSurfaceData.*;
import static sun.java2d.opengl.OGLContext.OGLContextCaps.*;
-import sun.java2d.opengl.CGLSurfaceData.CGLVSyncOffScreenSurfaceData;
import sun.java2d.pipe.hw.AccelDeviceEventListener;
import sun.java2d.pipe.hw.AccelDeviceEventNotifier;
+import sun.lwawt.LWComponentPeer;
import sun.lwawt.macosx.CPlatformView;
-public class CGLGraphicsConfig extends CGraphicsConfig
- implements OGLGraphicsConfig, TextureSizeConstraining
+public final class CGLGraphicsConfig extends CGraphicsConfig
+ implements OGLGraphicsConfig
{
- //private static final int kOpenGLSwapInterval = RuntimeOptions.getCurrentOptions().OpenGLSwapInterval;
+ //private static final int kOpenGLSwapInterval =
+ // RuntimeOptions.getCurrentOptions().OpenGLSwapInterval;
private static final int kOpenGLSwapInterval = 0; // TODO
- protected static boolean cglAvailable;
+ private static boolean cglAvailable;
private static ImageCapabilities imageCaps = new CGLImageCaps();
private int pixfmt;
@@ -82,7 +79,7 @@
private long pConfigInfo;
private ContextCapabilities oglCaps;
private OGLContext context;
- private Object disposerReferent = new Object();
+ private final Object disposerReferent = new Object();
public static native int getDefaultPixFmt(int screennum);
private static native boolean initCGL();
@@ -94,7 +91,7 @@
cglAvailable = initCGL();
}
- protected CGLGraphicsConfig(CGraphicsDevice device, int pixfmt,
+ private CGLGraphicsConfig(CGraphicsDevice device, int pixfmt,
long configInfo, ContextCapabilities oglCaps)
{
super(device);
@@ -170,11 +167,13 @@
* Returns true if the provided capability bit is present for this config.
* See OGLContext.java for a list of supported capabilities.
*/
- public final boolean isCapPresent(int cap) {
+ @Override
+ public boolean isCapPresent(int cap) {
return ((oglCaps.getCaps() & cap) != 0);
}
- public final long getNativeConfigInfo() {
+ @Override
+ public long getNativeConfigInfo() {
return pConfigInfo;
}
@@ -183,7 +182,8 @@
*
* @see sun.java2d.pipe.hw.BufferedContextProvider#getContext
*/
- public final OGLContext getContext() {
+ @Override
+ public OGLContext getContext() {
return context;
}
@@ -257,145 +257,83 @@
return ("CGLGraphicsConfig[dev="+screen+",pixfmt="+pixfmt+"]");
}
-
- /**
- * The following methods are invoked from ComponentModel.java rather
- * than having the Mac OS X-dependent implementations hardcoded in that
- * class. This way the appropriate actions are taken based on the peer's
- * GraphicsConfig, whether it is a CGraphicsConfig or a
- * CGLGraphicsConfig.
- */
-
- /**
- * Creates a new SurfaceData that will be associated with the given
- * LWWindowPeer.
- */
@Override
public SurfaceData createSurfaceData(CPlatformView pView) {
return CGLSurfaceData.createData(pView);
}
- /**
- * Creates a new SurfaceData that will be associated with the given
- * CGLLayer.
- */
@Override
public SurfaceData createSurfaceData(CGLLayer layer) {
return CGLSurfaceData.createData(layer);
}
- /**
- * Creates a new hidden-acceleration image of the given width and height
- * that is associated with the target Component.
- */
@Override
public Image createAcceleratedImage(Component target,
int width, int height)
{
ColorModel model = getColorModel(Transparency.OPAQUE);
- WritableRaster wr =
- model.createCompatibleWritableRaster(width, height);
+ WritableRaster wr = model.createCompatibleWritableRaster(width, height);
return new OffScreenImage(target, model, wr,
model.isAlphaPremultiplied());
}
- /**
- * The following methods correspond to the multibuffering methods in
- * CWindowPeer.java...
- */
-
- /**
- * Attempts to create a OGL-based backbuffer for the given peer. If
- * the requested configuration is not natively supported, an AWTException
- * is thrown. Otherwise, if the backbuffer creation is successful, a
- * value of 1 is returned.
- */
@Override
- public long createBackBuffer(CPlatformView pView,
- int numBuffers, BufferCapabilities caps)
- throws AWTException
- {
- if (numBuffers > 2) {
- throw new AWTException(
- "Only double or single buffering is supported");
+ public void assertOperationSupported(final int numBuffers,
+ final BufferCapabilities caps)
+ throws AWTException {
+ // Assume this method is never called with numBuffers != 2, as 0 is
+ // unsupported, and 1 corresponds to a SingleBufferStrategy which
+ // doesn't depend on the peer. Screen is considered as a separate
+ // "buffer".
+ if (numBuffers != 2) {
+ throw new AWTException("Only double buffering is supported");
}
- BufferCapabilities configCaps = getBufferCapabilities();
+ final BufferCapabilities configCaps = getBufferCapabilities();
if (!configCaps.isPageFlipping()) {
throw new AWTException("Page flipping is not supported");
}
if (caps.getFlipContents() == BufferCapabilities.FlipContents.PRIOR) {
throw new AWTException("FlipContents.PRIOR is not supported");
}
-
- // non-zero return value means backbuffer creation was successful
- // (checked in CPlatformWindow.flip(), etc.)
- return 1;
}
- /**
- * Destroys the backbuffer object represented by the given handle value.
- */
@Override
- public void destroyBackBuffer(long backBuffer) {
- }
-
- /**
- * Creates a VolatileImage that essentially wraps the target Component's
- * backbuffer (the provided backbuffer handle is essentially ignored).
- */
- @Override
- public VolatileImage createBackBufferImage(Component target,
- long backBuffer)
- {
- return new SunVolatileImage(target,
- target.getWidth(), target.getHeight(),
- Boolean.TRUE);
+ public Image createBackBuffer(final LWComponentPeer<?, ?> peer) {
+ final Rectangle r = peer.getBounds();
+ // It is possible for the component to have size 0x0, adjust it to
+ // be at least 1x1 to avoid IAE
+ final int w = Math.max(1, r.width);
+ final int h = Math.max(1, r.height);
+ final int transparency = peer.isTranslucent() ? Transparency.TRANSLUCENT
+ : Transparency.OPAQUE;
+ return new SunVolatileImage(this, w, h, transparency, null);
}
- /**
- * Performs the native OGL flip operation for the given target Component.
- */
@Override
- public void flip(CPlatformView pView,
- Component target, VolatileImage xBackBuffer,
- int x1, int y1, int x2, int y2,
- BufferCapabilities.FlipContents flipAction)
- {
- if (flipAction == BufferCapabilities.FlipContents.COPIED) {
- SurfaceManager vsm = SurfaceManager.getManager(xBackBuffer);
- SurfaceData sd = vsm.getPrimarySurfaceData();
+ public void destroyBackBuffer(final Image backBuffer) {
+ if (backBuffer != null) {
+ backBuffer.flush();
+ }
+ }
- if (sd instanceof CGLVSyncOffScreenSurfaceData) {
- CGLVSyncOffScreenSurfaceData vsd =
- (CGLVSyncOffScreenSurfaceData)sd;
- SurfaceData bbsd = vsd.getFlipSurface();
- Graphics2D bbg =
- new SunGraphics2D(bbsd, Color.black, Color.white, null);
- try {
- bbg.drawImage(xBackBuffer, 0, 0, null);
- } finally {
- bbg.dispose();
- }
- } else {
- pView.drawImageOnPeer(xBackBuffer, x1, y1, x2, y2);
- return;
- }
- } else if (flipAction == BufferCapabilities.FlipContents.PRIOR) {
- // not supported by CGL...
- return;
+ @Override
+ public void flip(final LWComponentPeer<?, ?> peer, final Image backBuffer,
+ final int x1, final int y1, final int x2, final int y2,
+ final BufferCapabilities.FlipContents flipAction) {
+ final Graphics g = peer.getGraphics();
+ try {
+ g.drawImage(backBuffer, x1, y1, x2, y2, x1, y1, x2, y2, null);
+ } finally {
+ g.dispose();
}
-
- OGLSurfaceData.swapBuffers(pView.getAWTView());
-
if (flipAction == BufferCapabilities.FlipContents.BACKGROUND) {
- Graphics g = xBackBuffer.getGraphics();
+ final Graphics2D bg = (Graphics2D) backBuffer.getGraphics();
try {
- g.setColor(target.getBackground());
- g.fillRect(0, 0,
- xBackBuffer.getWidth(),
- xBackBuffer.getHeight());
+ bg.setBackground(peer.getBackground());
+ bg.clearRect(0, 0, backBuffer.getWidth(null),
+ backBuffer.getHeight(null));
} finally {
- g.dispose();
+ bg.dispose();
}
}
}
@@ -429,15 +367,10 @@
return imageCaps;
}
- /**
- * {@inheritDoc}
- *
- * @see sun.java2d.pipe.hw.AccelGraphicsConfig#createCompatibleVolatileImage
- */
- public VolatileImage
- createCompatibleVolatileImage(int width, int height,
- int transparency, int type)
- {
+ @Override
+ public VolatileImage createCompatibleVolatileImage(int width, int height,
+ int transparency,
+ int type) {
if (type == FLIP_BACKBUFFER || type == WINDOW || type == UNDEFINED ||
transparency == Transparency.BITMASK)
{
@@ -473,15 +406,18 @@
*
* @see sun.java2d.pipe.hw.AccelGraphicsConfig#getContextCapabilities
*/
+ @Override
public ContextCapabilities getContextCapabilities() {
return oglCaps;
}
+ @Override
public void addDeviceEventListener(AccelDeviceEventListener l) {
int screen = getDevice().getCoreGraphicsScreen();
AccelDeviceEventNotifier.addListener(l, screen);
}
+ @Override
public void removeDeviceEventListener(AccelDeviceEventListener l) {
AccelDeviceEventNotifier.removeListener(l);
}
--- a/jdk/src/macosx/classes/sun/lwawt/LWCanvasPeer.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/macosx/classes/sun/lwawt/LWCanvasPeer.java Mon Dec 17 08:30:06 2012 -0500
@@ -26,12 +26,9 @@
package sun.lwawt;
-import java.awt.AWTException;
-import java.awt.BufferCapabilities;
import java.awt.Component;
import java.awt.Dimension;
import java.awt.GraphicsConfiguration;
-import java.awt.Image;
import java.awt.peer.CanvasPeer;
import javax.swing.JComponent;
@@ -42,35 +39,10 @@
LWCanvasPeer(final T target, final PlatformComponent platformComponent) {
super(target, platformComponent);
}
- // ---- PEER METHODS ---- //
-
- @Override
- public void createBuffers(int numBuffers, BufferCapabilities caps)
- throws AWTException {
- // TODO
- }
-
- @Override
- public Image getBackBuffer() {
- // TODO
- return null;
- }
-
- @Override
- public void flip(int x1, int y1, int x2, int y2,
- BufferCapabilities.FlipContents flipAction) {
- // TODO
- }
-
- @Override
- public void destroyBuffers() {
- // TODO
- }
@Override
public final GraphicsConfiguration getAppropriateGraphicsConfiguration(
- GraphicsConfiguration gc)
- {
+ final GraphicsConfiguration gc) {
// TODO
return gc;
}
--- a/jdk/src/macosx/classes/sun/lwawt/LWComponentPeer.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/macosx/classes/sun/lwawt/LWComponentPeer.java Mon Dec 17 08:30:06 2012 -0500
@@ -138,6 +138,11 @@
*/
static final char WIDE_CHAR = '0';
+ /**
+ * The back buffer provide user with a BufferStrategy.
+ */
+ private Image backBuffer;
+
private final class DelegateContainer extends Container {
{
enableEvents(0xFFFFFFFF);
@@ -389,6 +394,7 @@
}
protected void disposeImpl() {
+ destroyBuffers();
LWContainerPeer cp = getContainerPeer();
if (cp != null) {
cp.removeChildPeer(this);
@@ -415,6 +421,12 @@
return getWindowPeer().getGraphicsConfiguration();
}
+
+ // Just a helper method
+ public final LWGraphicsConfig getLWGC() {
+ return (LWGraphicsConfig) getGraphicsConfiguration();
+ }
+
/*
* Overridden in LWWindowPeer to replace its surface
* data and back buffer.
@@ -506,31 +518,45 @@
return getGraphicsConfiguration().getColorModel();
}
- @Override
- public void createBuffers(int numBuffers, BufferCapabilities caps)
- throws AWTException {
- throw new AWTException("Back buffers are only supported for " +
- "Window or Canvas components.");
+ public boolean isTranslucent() {
+ // Translucent windows of the top level are supported only
+ return false;
}
- /*
- * To be overridden in LWWindowPeer and LWCanvasPeer.
- */
@Override
- public Image getBackBuffer() {
- // Return null or throw AWTException?
- return null;
+ public final void createBuffers(int numBuffers, BufferCapabilities caps)
+ throws AWTException {
+ getLWGC().assertOperationSupported(numBuffers, caps);
+ final Image buffer = getLWGC().createBackBuffer(this);
+ synchronized (getStateLock()) {
+ backBuffer = buffer;
+ }
}
@Override
- public void flip(int x1, int y1, int x2, int y2,
- BufferCapabilities.FlipContents flipAction) {
- // Skip silently or throw AWTException?
+ public final Image getBackBuffer() {
+ synchronized (getStateLock()) {
+ if (backBuffer != null) {
+ return backBuffer;
+ }
+ }
+ throw new IllegalStateException("Buffers have not been created");
}
@Override
- public void destroyBuffers() {
- // Do nothing
+ public final void flip(int x1, int y1, int x2, int y2,
+ BufferCapabilities.FlipContents flipAction) {
+ getLWGC().flip(this, getBackBuffer(), x1, y1, x2, y2, flipAction);
+ }
+
+ @Override
+ public final void destroyBuffers() {
+ final Image oldBB;
+ synchronized (getStateLock()) {
+ oldBB = backBuffer;
+ backBuffer = null;
+ }
+ getLWGC().destroyBackBuffer(oldBB);
}
// Helper method
@@ -642,7 +668,7 @@
}
}
- protected final Color getBackground() {
+ public final Color getBackground() {
synchronized (getStateLock()) {
return background;
}
@@ -982,19 +1008,17 @@
}
@Override
- public Image createImage(ImageProducer producer) {
+ public final Image createImage(final ImageProducer producer) {
return new ToolkitImage(producer);
}
@Override
- public Image createImage(int w, int h) {
- CGraphicsConfig gc = (CGraphicsConfig)getGraphicsConfiguration();
- return gc.createAcceleratedImage(getTarget(), w, h);
+ public final Image createImage(final int width, final int height) {
+ return getLWGC().createAcceleratedImage(getTarget(), width, height);
}
@Override
- public VolatileImage createVolatileImage(int w, int h) {
- // TODO: is it a right/complete implementation?
+ public final VolatileImage createVolatileImage(final int w, final int h) {
return new SunVolatileImage(getTarget(), w, h);
}
@@ -1105,8 +1129,6 @@
* of target.setLocation() or as a result of user actions (window is
* dragged with mouse).
*
- * To be overridden in LWWindowPeer to update its GraphicsConfig.
- *
* This method could be called on the toolkit thread.
*/
protected final void handleMove(final int x, final int y,
@@ -1122,13 +1144,19 @@
* Called when this peer's size has been changed either as a result of
* target.setSize() or as a result of user actions (window is resized).
*
- * To be overridden in LWWindowPeer to update its SurfaceData and
- * GraphicsConfig.
- *
* This method could be called on the toolkit thread.
*/
protected final void handleResize(final int w, final int h,
final boolean updateTarget) {
+ Image oldBB = null;
+ synchronized (getStateLock()) {
+ if (backBuffer != null) {
+ oldBB = backBuffer;
+ backBuffer = getLWGC().createBackBuffer(this);
+ }
+ }
+ getLWGC().destroyBackBuffer(oldBB);
+
if (updateTarget) {
AWTAccessor.getComponentAccessor().setSize(getTarget(), w, h);
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/macosx/classes/sun/lwawt/LWGraphicsConfig.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package sun.lwawt;
+
+import java.awt.AWTException;
+import java.awt.BufferCapabilities;
+import java.awt.Component;
+import java.awt.Image;
+
+/**
+ * As lwawt can be used on different platforms with different graphic
+ * configurations, the general set of methods is necessary. This interface
+ * collects the methods that should be provided by GraphicsConfiguration,
+ * simplifying use by the LWAWT.
+ *
+ * @author Sergey Bylokhov
+ */
+public interface LWGraphicsConfig {
+
+ /*
+ * A GraphicsConfiguration must implements following methods to indicate
+ * that it imposes certain limitations on the maximum size of supported
+ * textures.
+ */
+
+ /**
+ * Returns the maximum width of any texture image. By default return {@code
+ * Integer.MAX_VALUE}.
+ */
+ int getMaxTextureWidth();
+
+ /**
+ * Returns the maximum height of any texture image. By default return {@code
+ * Integer.MAX_VALUE}.
+ */
+ int getMaxTextureHeight();
+
+ /*
+ * The following methods correspond to the multi-buffering methods in
+ * LWComponentPeer.java.
+ */
+
+ /**
+ * Checks that the requested configuration is natively supported; if not, an
+ * AWTException is thrown.
+ */
+ void assertOperationSupported(int numBuffers, BufferCapabilities caps)
+ throws AWTException;
+
+ /**
+ * Creates a back buffer for the given peer and returns the image wrapper.
+ */
+ Image createBackBuffer(LWComponentPeer<?, ?> peer);
+
+ /**
+ * Destroys the back buffer object.
+ */
+ void destroyBackBuffer(Image backBuffer);
+
+ /**
+ * Performs the native flip operation for the given target Component. Our
+ * flip is implemented through normal drawImage() to the graphic object,
+ * because of our components uses a graphic object of the container(in this
+ * case we also apply necessary constrains)
+ */
+ void flip(LWComponentPeer<?, ?> peer, Image backBuffer, int x1, int y1,
+ int x2, int y2, BufferCapabilities.FlipContents flipAction);
+
+ /**
+ * Creates a new hidden-acceleration image of the given width and height
+ * that is associated with the target Component.
+ */
+ Image createAcceleratedImage(Component target, int width, int height);
+}
--- a/jdk/src/macosx/classes/sun/lwawt/LWWindowPeer.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/macosx/classes/sun/lwawt/LWWindowPeer.java Mon Dec 17 08:30:06 2012 -0500
@@ -27,7 +27,6 @@
import java.awt.*;
import java.awt.event.*;
-import java.awt.image.BufferedImage;
import java.awt.peer.*;
import java.util.List;
@@ -75,17 +74,6 @@
private SurfaceData surfaceData;
private final Object surfaceDataLock = new Object();
- private int backBufferCount;
- private BufferCapabilities backBufferCaps;
-
- // The back buffer is used for two purposes:
- // 1. To render all the lightweight peers
- // 2. To provide user with a BufferStrategy
- // Need to check if a single back buffer can be used for both
-// TODO: VolatileImage
-// private VolatileImage backBuffer;
- private volatile BufferedImage backBuffer;
-
private volatile int windowState = Frame.NORMAL;
// check that the mouse is over the window
@@ -227,7 +215,6 @@
if (isGrabbing()) {
ungrab();
}
- destroyBuffers();
platformWindow.dispose();
super.disposeImpl();
}
@@ -258,8 +245,10 @@
}
@Override
- public GraphicsConfiguration getGraphicsConfiguration() {
- return graphicsConfig;
+ public final GraphicsConfiguration getGraphicsConfiguration() {
+ synchronized (getStateLock()) {
+ return graphicsConfig;
+ }
}
@Override
@@ -285,48 +274,6 @@
}
@Override
- public void createBuffers(int numBuffers, BufferCapabilities caps)
- throws AWTException
- {
- try {
- // Assume this method is never called with numBuffers <= 1, as 0 is
- // unsupported, and 1 corresponds to a SingleBufferStrategy which
- // doesn't depend on the peer. Screen is considered as a separate
- // "buffer", that's why numBuffers - 1
- assert numBuffers > 1;
-
- replaceSurfaceData(numBuffers - 1, caps, false);
- } catch (InvalidPipeException z) {
- throw new AWTException(z.toString());
- }
- }
-
- @Override
- public final Image getBackBuffer() {
- synchronized (getStateLock()) {
- return backBuffer;
- }
- }
-
- @Override
- public void flip(int x1, int y1, int x2, int y2,
- BufferCapabilities.FlipContents flipAction)
- {
- platformWindow.flip(x1, y1, x2, y2, flipAction);
- }
-
- @Override
- public final void destroyBuffers() {
- final Image oldBB = getBackBuffer();
- synchronized (getStateLock()) {
- backBuffer = null;
- }
- if (oldBB != null) {
- oldBB.flush();
- }
- }
-
- @Override
public void setBounds(int x, int y, int w, int h, int op) {
if ((op & SET_CLIENT_SIZE) != 0) {
// SET_CLIENT_SIZE is only applicable to window peers, so handle it here
@@ -343,16 +290,14 @@
h = MINIMUM_HEIGHT;
}
- if (graphicsConfig instanceof TextureSizeConstraining) {
- final int maxW = ((TextureSizeConstraining)graphicsConfig).getMaxTextureWidth();
- final int maxH = ((TextureSizeConstraining)graphicsConfig).getMaxTextureHeight();
+ final int maxW = getLWGC().getMaxTextureWidth();
+ final int maxH = getLWGC().getMaxTextureHeight();
- if (w > maxW) {
- w = maxW;
- }
- if (h > maxH) {
- h = maxH;
- }
+ if (w > maxW) {
+ w = maxW;
+ }
+ if (h > maxH) {
+ h = maxH;
}
// Don't post ComponentMoved/Resized and Paint events
@@ -431,21 +376,14 @@
min = new Dimension(MINIMUM_WIDTH, MINIMUM_HEIGHT);
}
- final int maxW, maxH;
- if (graphicsConfig instanceof TextureSizeConstraining) {
- maxW = ((TextureSizeConstraining)graphicsConfig).getMaxTextureWidth();
- maxH = ((TextureSizeConstraining)graphicsConfig).getMaxTextureHeight();
- } else {
- maxW = maxH = Integer.MAX_VALUE;
- }
-
final Dimension max;
if (getTarget().isMaximumSizeSet()) {
max = getTarget().getMaximumSize();
- max.width = Math.min(max.width, maxW);
- max.height = Math.min(max.height, maxH);
+ max.width = Math.min(max.width, getLWGC().getMaxTextureWidth());
+ max.height = Math.min(max.height, getLWGC().getMaxTextureHeight());
} else {
- max = new Dimension(maxW, maxH);
+ max = new Dimension(getLWGC().getMaxTextureWidth(),
+ getLWGC().getMaxTextureHeight());
}
platformWindow.setSizeConstraints(min.width, min.height, max.width, max.height);
@@ -1014,21 +952,10 @@
replaceSurfaceData(true);
}
- private void replaceSurfaceData(boolean blit) {
- replaceSurfaceData(backBufferCount, backBufferCaps, blit);
- }
-
- private void replaceSurfaceData(int newBackBufferCount,
- BufferCapabilities newBackBufferCaps,
- boolean blit) {
+ private void replaceSurfaceData(final boolean blit) {
synchronized (surfaceDataLock) {
final SurfaceData oldData = getSurfaceData();
surfaceData = platformWindow.replaceSurfaceData();
- // TODO: volatile image
- // VolatileImage oldBB = backBuffer;
- BufferedImage oldBB = backBuffer;
- backBufferCount = newBackBufferCount;
- backBufferCaps = newBackBufferCaps;
final Rectangle size = getSize();
if (getSurfaceData() != null && oldData != getSurfaceData()) {
clearBackground(size.width, size.height);
@@ -1043,35 +970,6 @@
// This can only happen when this peer is being created
oldData.flush();
}
-
- // TODO: volatile image
- // backBuffer = (VolatileImage)delegate.createBackBuffer();
- backBuffer = (BufferedImage) platformWindow.createBackBuffer();
- if (backBuffer != null) {
- Graphics g = backBuffer.getGraphics();
- try {
- Rectangle r = getBounds();
- if (g instanceof Graphics2D) {
- ((Graphics2D) g).setComposite(AlphaComposite.Src);
- }
- g.setColor(nonOpaqueBackground);
- g.fillRect(0, 0, r.width, r.height);
- if (g instanceof SunGraphics2D) {
- SG2DConstraint((SunGraphics2D) g, getRegion());
- }
- if (!isTextured()) {
- g.setColor(getBackground());
- g.fillRect(0, 0, r.width, r.height);
- }
- if (oldBB != null) {
- // Draw the old back buffer to the new one
- g.drawImage(oldBB, 0, 0, null);
- oldBB.flush();
- }
- } finally {
- g.dispose();
- }
- }
}
}
@@ -1092,14 +990,6 @@
}
}
- public int getBackBufferCount() {
- return backBufferCount;
- }
-
- public BufferCapabilities getBackBufferCaps() {
- return backBufferCaps;
- }
-
/*
* Request the window insets from the delegate and compares it
* with the current one. This method is mostly called by the
--- a/jdk/src/macosx/classes/sun/lwawt/PlatformWindow.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/macosx/classes/sun/lwawt/PlatformWindow.java Mon Dec 17 08:30:06 2012 -0500
@@ -97,17 +97,6 @@
*/
public SurfaceData replaceSurfaceData();
- /*
- * Creates a new image to serve as a back buffer.
- */
- public Image createBackBuffer();
-
- /*
- * Move the given part of the back buffer to the front buffer.
- */
- public void flip(int x1, int y1, int x2, int y2,
- BufferCapabilities.FlipContents flipAction);
-
public void setModalBlocked(boolean blocked);
public void toFront();
--- a/jdk/src/macosx/classes/sun/lwawt/macosx/CPlatformEmbeddedFrame.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/macosx/classes/sun/lwawt/macosx/CPlatformEmbeddedFrame.java Mon Dec 17 08:30:06 2012 -0500
@@ -31,12 +31,9 @@
import sun.java2d.opengl.CGLLayer;
import sun.java2d.SurfaceData;
-import sun.awt.CGraphicsConfig;
-import sun.awt.CGraphicsDevice;
import sun.awt.CausedFocusEvent;
import java.awt.*;
-import java.awt.BufferCapabilities.FlipContents;
import sun.util.logging.PlatformLogger;
@@ -113,22 +110,6 @@
}
@Override
- public Image createBackBuffer() {
- Rectangle r = peer.getBounds();
- Image im = null;
- if (!r.isEmpty()) {
- int transparency = peer.isTranslucent() ? Transparency.TRANSLUCENT : Transparency.OPAQUE;
- im = peer.getGraphicsConfiguration().createCompatibleImage(r.width, r.height, transparency);
- }
- return im;
- }
-
- @Override
- public void flip(int x1, int y1, int x2, int y2, FlipContents flipAction) {
- throw new RuntimeException("Not implemented");
- }
-
- @Override
public void setVisible(boolean visible) {}
@Override
--- a/jdk/src/macosx/classes/sun/lwawt/macosx/CPlatformView.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/macosx/classes/sun/lwawt/macosx/CPlatformView.java Mon Dec 17 08:30:06 2012 -0500
@@ -26,7 +26,6 @@
package sun.lwawt.macosx;
import java.awt.*;
-import java.awt.image.VolatileImage;
import sun.awt.CGraphicsConfig;
import sun.lwawt.LWWindowPeer;
@@ -115,26 +114,6 @@
// ----------------------------------------------------------------------
// PAINTING METHODS
// ----------------------------------------------------------------------
-
- public void drawImageOnPeer(VolatileImage xBackBuffer, int x1, int y1, int x2, int y2) {
- Graphics g = peer.getGraphics();
- try {
- g.drawImage(xBackBuffer, x1, y1, x2, y2, x1, y1, x2, y2, null);
- } finally {
- g.dispose();
- }
- }
-
- public Image createBackBuffer() {
- Rectangle r = peer.getBounds();
- Image im = null;
- if (!r.isEmpty()) {
- int transparency = (isOpaque() ? Transparency.OPAQUE : Transparency.TRANSLUCENT);
- im = peer.getGraphicsConfiguration().createCompatibleImage(r.width, r.height, transparency);
- }
- return im;
- }
-
public SurfaceData replaceSurfaceData() {
if (!LWCToolkit.getSunAwtDisableCALayers()) {
surfaceData = windowLayer.replaceSurfaceData();
--- a/jdk/src/macosx/classes/sun/lwawt/macosx/CPlatformWindow.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/macosx/classes/sun/lwawt/macosx/CPlatformWindow.java Mon Dec 17 08:30:06 2012 -0500
@@ -25,7 +25,6 @@
package sun.lwawt.macosx;
-import java.awt.BufferCapabilities.FlipContents;
import java.awt.*;
import java.awt.Dialog.ModalityType;
import java.awt.event.*;
@@ -258,7 +257,7 @@
validateSurface();
}
- protected int getInitialStyleBits() {
+ private int getInitialStyleBits() {
// defaults style bits
int styleBits = DECORATED | HAS_SHADOW | CLOSEABLE | MINIMIZABLE | ZOOMABLE | RESIZABLE;
@@ -285,7 +284,6 @@
final boolean resizable = isFrame ? ((Frame)target).isResizable() : (isDialog ? ((Dialog)target).isResizable() : false);
styleBits = SET(styleBits, RESIZABLE, resizable);
if (!resizable) {
- styleBits = SET(styleBits, RESIZABLE, false);
styleBits = SET(styleBits, ZOOMABLE, false);
}
}
@@ -380,7 +378,7 @@
}
// this is the counter-point to -[CWindow _nativeSetStyleBit:]
- protected void setStyleBits(final int mask, final boolean value) {
+ private void setStyleBits(final int mask, final boolean value) {
nativeSetNSWindowStyleBits(getNSWindowPtr(), mask, value ? mask : 0);
}
@@ -402,11 +400,6 @@
}
@Override // PlatformWindow
- public Image createBackBuffer() {
- return contentView.createBackBuffer();
- }
-
- @Override // PlatformWindow
public void dispose() {
if (owner != null) {
CWrapper.NSWindow.removeChildWindow(owner.getNSWindowPtr(), getNSWindowPtr());
@@ -417,12 +410,6 @@
}
@Override // PlatformWindow
- public void flip(int x1, int y1, int x2, int y2, FlipContents flipAction) {
- // TODO: not implemented
- (new RuntimeException("unimplemented")).printStackTrace();
- }
-
- @Override // PlatformWindow
public FontMetrics getFontMetrics(Font f) {
// TODO: not implemented
(new RuntimeException("unimplemented")).printStackTrace();
@@ -668,15 +655,8 @@
}
@Override
- public void setResizable(boolean resizable) {
+ public void setResizable(final boolean resizable) {
setStyleBits(RESIZABLE, resizable);
-
- // Re-apply the size constraints and the size to ensure the space
- // occupied by the grow box is counted properly
- peer.updateMinimumSize();
-
- Rectangle bounds = peer.getBounds();
- setBounds(bounds.x, bounds.y, bounds.width, bounds.height);
}
@Override
@@ -889,7 +869,8 @@
responder.handleWindowFocusEvent(gained, oppositePeer);
}
- private void deliverMoveResizeEvent(int x, int y, int width, int height) {
+ private void deliverMoveResizeEvent(int x, int y, int width, int height,
+ boolean byUser) {
// when the content view enters the full-screen mode, the native
// move/resize notifications contain a bounds smaller than
// the whole screen and therefore we ignore the native notifications
@@ -901,7 +882,7 @@
final Rectangle oldB = nativeBounds;
nativeBounds = new Rectangle(x, y, width, height);
peer.notifyReshape(x, y, width, height);
- if (!oldB.getSize().equals(nativeBounds.getSize()) ) {
+ if (byUser && !oldB.getSize().equals(nativeBounds.getSize())) {
flushBuffers();
}
//TODO validateSurface already called from notifyReshape
--- a/jdk/src/macosx/native/sun/awt/AWTWindow.m Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/macosx/native/sun/awt/AWTWindow.m Mon Dec 17 08:30:06 2012 -0500
@@ -160,6 +160,10 @@
BOOL resizable = IS(bits, RESIZABLE);
[self updateMinMaxSize:resizable];
[self.nsWindow setShowsResizeIndicator:resizable];
+ // Zoom button should be disabled, if the window is not resizable,
+ // otherwise button should be restored to initial state.
+ BOOL zoom = resizable && IS(bits, ZOOMABLE);
+ [[self.nsWindow standardWindowButton:NSWindowZoomButton] setEnabled:zoom];
}
if (IS(mask, HAS_SHADOW)) {
@@ -445,12 +449,13 @@
NSRect frame = ConvertNSScreenRect(env, [self.nsWindow frame]);
- static JNF_MEMBER_CACHE(jm_deliverMoveResizeEvent, jc_CPlatformWindow, "deliverMoveResizeEvent", "(IIII)V");
+ static JNF_MEMBER_CACHE(jm_deliverMoveResizeEvent, jc_CPlatformWindow, "deliverMoveResizeEvent", "(IIIIZ)V");
JNFCallVoidMethod(env, platformWindow, jm_deliverMoveResizeEvent,
(jint)frame.origin.x,
(jint)frame.origin.y,
(jint)frame.size.width,
- (jint)frame.size.height);
+ (jint)frame.size.height,
+ (jboolean)[self.nsWindow inLiveResize]);
(*env)->DeleteLocalRef(env, platformWindow);
}
@@ -784,7 +789,7 @@
// calls methods on NSWindow to change other properties, based on the mask
if (mask & MASK(_METHOD_PROP_BITMASK)) {
- [window setPropertiesForStyleBits:bits mask:mask];
+ [window setPropertiesForStyleBits:newBits mask:mask];
}
window.styleBits = newBits;
--- a/jdk/src/share/classes/com/sun/imageio/plugins/gif/GIFImageReader.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/com/sun/imageio/plugins/gif/GIFImageReader.java Mon Dec 17 08:30:06 2012 -0500
@@ -30,14 +30,10 @@
import java.awt.image.BufferedImage;
import java.awt.image.DataBuffer;
import java.awt.image.WritableRaster;
-import java.io.BufferedInputStream;
-import java.io.DataInputStream;
import java.io.EOFException;
-import java.io.InputStream;
import java.io.IOException;
import java.nio.ByteOrder;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import javax.imageio.IIOException;
@@ -48,6 +44,11 @@
import javax.imageio.spi.ImageReaderSpi;
import javax.imageio.stream.ImageInputStream;
import com.sun.imageio.plugins.common.ReaderUtil;
+import java.awt.image.ColorModel;
+import java.awt.image.IndexColorModel;
+import java.awt.image.MultiPixelPackedSampleModel;
+import java.awt.image.PixelInterleavedSampleModel;
+import java.awt.image.SampleModel;
public class GIFImageReader extends ImageReader {
@@ -194,6 +195,36 @@
return imageMetadata.imageHeight;
}
+ // We don't check all parameters as ImageTypeSpecifier.createIndexed do
+ // since this method is private and we pass consistent data here
+ private ImageTypeSpecifier createIndexed(byte[] r, byte[] g, byte[] b,
+ int bits) {
+ ColorModel colorModel;
+ if (imageMetadata.transparentColorFlag) {
+ // Some files erroneously have a transparent color index
+ // of 255 even though there are fewer than 256 colors.
+ int idx = Math.min(imageMetadata.transparentColorIndex,
+ r.length - 1);
+ colorModel = new IndexColorModel(bits, r.length, r, g, b, idx);
+ } else {
+ colorModel = new IndexColorModel(bits, r.length, r, g, b);
+ }
+
+ SampleModel sampleModel;
+ if (bits == 8) {
+ int[] bandOffsets = {0};
+ sampleModel =
+ new PixelInterleavedSampleModel(DataBuffer.TYPE_BYTE,
+ 1, 1, 1, 1,
+ bandOffsets);
+ } else {
+ sampleModel =
+ new MultiPixelPackedSampleModel(DataBuffer.TYPE_BYTE,
+ 1, 1, bits);
+ }
+ return new ImageTypeSpecifier(colorModel, sampleModel);
+ }
+
public Iterator getImageTypes(int imageIndex) throws IIOException {
checkIndex(imageIndex);
@@ -239,22 +270,7 @@
b[i] = colorTable[rgbIndex++];
}
- byte[] a = null;
- if (imageMetadata.transparentColorFlag) {
- a = new byte[lutLength];
- Arrays.fill(a, (byte)255);
-
- // Some files erroneously have a transparent color index
- // of 255 even though there are fewer than 256 colors.
- int idx = Math.min(imageMetadata.transparentColorIndex,
- lutLength - 1);
- a[idx] = (byte)0;
- }
-
- int[] bitsPerSample = new int[1];
- bitsPerSample[0] = bits;
- l.add(ImageTypeSpecifier.createIndexed(r, g, b, a, bits,
- DataBuffer.TYPE_BYTE));
+ l.add(createIndexed(r, g, b, bits));
return l.iterator();
}
--- a/jdk/src/share/classes/com/sun/java/util/jar/pack/PropMap.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/com/sun/java/util/jar/pack/PropMap.java Mon Dec 17 08:30:06 2012 -0500
@@ -25,8 +25,6 @@
package com.sun.java.util.jar.pack;
-import java.beans.PropertyChangeListener;
-import java.beans.PropertyChangeEvent;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintStream;
@@ -42,40 +40,39 @@
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.jar.Pack200;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+
/**
* Control block for publishing Pack200 options to the other classes.
*/
final class PropMap implements SortedMap<String, String> {
private final TreeMap<String, String> theMap = new TreeMap<>();;
- private final List<PropertyChangeListener> listenerList = new ArrayList<>(1);
- void addListener(PropertyChangeListener listener) {
+ // type is erased, elements are of type java.beans.PropertyChangeListener
+ private final List<Object> listenerList = new ArrayList<>(1);
+
+ void addListener(Object listener) {
+ assert Beans.isPropertyChangeListener(listener);
listenerList.add(listener);
}
- void removeListener(PropertyChangeListener listener) {
+ void removeListener(Object listener) {
+ assert Beans.isPropertyChangeListener(listener);
listenerList.remove(listener);
}
- void addListeners(ArrayList<PropertyChangeListener> listeners) {
- listenerList.addAll(listeners);
- }
-
- void removeListeners(ArrayList<PropertyChangeListener> listeners) {
- listenerList.removeAll(listeners);
- }
-
// Override:
public String put(String key, String value) {
String oldValue = theMap.put(key, value);
if (value != oldValue && !listenerList.isEmpty()) {
+ assert Beans.isBeansPresent();
// Post the property change event.
- PropertyChangeEvent event =
- new PropertyChangeEvent(this, key,
- oldValue, value);
- for (PropertyChangeListener listener : listenerList) {
- listener.propertyChange(event);
+ Object event = Beans.newPropertyChangeEvent(this, key, oldValue, value);
+ for (Object listener : listenerList) {
+ Beans.invokePropertyChange(listener, event);
}
}
return oldValue;
@@ -339,4 +336,113 @@
public String lastKey() {
return theMap.lastKey();
}
+
+ /**
+ * A class that provides access to the java.beans.PropertyChangeListener
+ * and java.beans.PropertyChangeEvent without creating a static dependency
+ * on java.beans. This class can be removed once the addPropertyChangeListener
+ * and removePropertyChangeListener methods are removed from Packer and
+ * Unpacker.
+ */
+ private static class Beans {
+ private static final Class<?> propertyChangeListenerClass =
+ getClass("java.beans.PropertyChangeListener");
+
+ private static final Class<?> propertyChangeEventClass =
+ getClass("java.beans.PropertyChangeEvent");
+
+ private static final Method propertyChangeMethod =
+ getMethod(propertyChangeListenerClass,
+ "propertyChange",
+ propertyChangeEventClass);
+
+ private static final Constructor<?> propertyEventCtor =
+ getConstructor(propertyChangeEventClass,
+ Object.class,
+ String.class,
+ Object.class,
+ Object.class);
+
+ private static Class<?> getClass(String name) {
+ try {
+ return Class.forName(name, true, Beans.class.getClassLoader());
+ } catch (ClassNotFoundException e) {
+ return null;
+ }
+ }
+ private static Constructor<?> getConstructor(Class<?> c, Class<?>... types) {
+ try {
+ return (c == null) ? null : c.getDeclaredConstructor(types);
+ } catch (NoSuchMethodException x) {
+ throw new AssertionError(x);
+ }
+ }
+
+ private static Method getMethod(Class<?> c, String name, Class<?>... types) {
+ try {
+ return (c == null) ? null : c.getMethod(name, types);
+ } catch (NoSuchMethodException e) {
+ throw new AssertionError(e);
+ }
+ }
+
+ /**
+ * Returns {@code true} if java.beans is present.
+ */
+ static boolean isBeansPresent() {
+ return propertyChangeListenerClass != null &&
+ propertyChangeEventClass != null;
+ }
+
+ /**
+ * Returns {@code true} if the given object is a PropertyChangeListener
+ */
+ static boolean isPropertyChangeListener(Object obj) {
+ if (propertyChangeListenerClass == null) {
+ return false;
+ } else {
+ return propertyChangeListenerClass.isInstance(obj);
+ }
+ }
+
+ /**
+ * Returns a new PropertyChangeEvent with the given source, property
+ * name, old and new values.
+ */
+ static Object newPropertyChangeEvent(Object source, String prop,
+ Object oldValue, Object newValue)
+ {
+ try {
+ return propertyEventCtor.newInstance(source, prop, oldValue, newValue);
+ } catch (InstantiationException | IllegalAccessException x) {
+ throw new AssertionError(x);
+ } catch (InvocationTargetException x) {
+ Throwable cause = x.getCause();
+ if (cause instanceof Error)
+ throw (Error)cause;
+ if (cause instanceof RuntimeException)
+ throw (RuntimeException)cause;
+ throw new AssertionError(x);
+ }
+ }
+
+ /**
+ * Invokes the given PropertyChangeListener's propertyChange method
+ * with the given event.
+ */
+ static void invokePropertyChange(Object listener, Object ev) {
+ try {
+ propertyChangeMethod.invoke(listener, ev);
+ } catch (IllegalAccessException x) {
+ throw new AssertionError(x);
+ } catch (InvocationTargetException x) {
+ Throwable cause = x.getCause();
+ if (cause instanceof Error)
+ throw (Error)cause;
+ if (cause instanceof RuntimeException)
+ throw (RuntimeException)cause;
+ throw new AssertionError(x);
+ }
+ }
+ }
}
--- a/jdk/src/share/classes/com/sun/net/ssl/KeyManagerFactory.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/com/sun/net/ssl/KeyManagerFactory.java Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -53,12 +53,13 @@
/**
* <p>The default KeyManager can be changed by setting the value of the
- * "sun.ssl.keymanager.type" security property (in the Java security
- * properties file) to the desired name.
+ * {@code sun.ssl.keymanager.type} security property to the desired name.
*
- * @return the default type as specified in the
- * Java security properties file, or an implementation-specific default
- * if no such property exists.
+ * @return the default type as specified by the
+ * {@code sun.ssl.keymanager.type} security property, or an
+ * implementation-specific default if no such property exists.
+ *
+ * @see java.security.Security security properties
*/
public final static String getDefaultAlgorithm() {
String type;
--- a/jdk/src/share/classes/com/sun/net/ssl/TrustManagerFactory.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/com/sun/net/ssl/TrustManagerFactory.java Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -53,12 +53,13 @@
/**
* <p>The default TrustManager can be changed by setting the value of the
- * "sun.ssl.trustmanager.type" security property
- * (in the Java security properties file) to the desired name.
+ * {@code sun.ssl.trustmanager.type} security property to the desired name.
*
- * @return the default type as specified in the
- * Java security properties file, or an implementation-specific default
- * if no such property exists.
+ * @return the default type as specified by the
+ * {@code sun.ssl.trustmanager.type} security property, or an
+ * implementation-specific default if no such property exists.
+ *
+ * @see java.security.Security security properties
*/
public final static String getDefaultAlgorithm() {
String type;
--- a/jdk/src/share/classes/com/sun/rowset/internal/CachedRowSetWriter.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/com/sun/rowset/internal/CachedRowSetWriter.java Mon Dec 17 08:30:06 2012 -0500
@@ -264,7 +264,7 @@
* <code>false</code> otherwise
*/
public boolean writeData(RowSetInternal caller) throws SQLException {
- boolean conflict = false;
+ long conflicts = 0;
boolean showDel = false;
PreparedStatement pstmtIns = null;
iChangedValsInDbAndCRS = 0;
@@ -337,8 +337,9 @@
while (crs.next()) {
if (crs.rowDeleted()) {
// The row has been deleted.
- if (conflict = (deleteOriginalRow(crs, this.crsResolve)) == true) {
+ if (deleteOriginalRow(crs, this.crsResolve)) {
status.add(rows, SyncResolver.DELETE_ROW_CONFLICT);
+ conflicts++;
} else {
// delete happened without any occurrence of conflicts
// so update status accordingly
@@ -349,8 +350,9 @@
// The row has been inserted.
pstmtIns = con.prepareStatement(insertCmd);
- if ( (conflict = insertNewRow(crs, pstmtIns, this.crsResolve)) == true) {
+ if (insertNewRow(crs, pstmtIns, this.crsResolve)) {
status.add(rows, SyncResolver.INSERT_ROW_CONFLICT);
+ conflicts++;
} else {
// insert happened without any occurrence of conflicts
// so update status accordingly
@@ -358,8 +360,9 @@
}
} else if (crs.rowUpdated()) {
// The row has been updated.
- if ( conflict = (updateOriginalRow(crs)) == true) {
+ if (updateOriginalRow(crs)) {
status.add(rows, SyncResolver.UPDATE_ROW_CONFLICT);
+ conflicts++;
} else {
// update happened without any occurrence of conflicts
// so update status accordingly
@@ -395,21 +398,12 @@
// reset
crs.setShowDeleted(showDel);
- boolean boolConf = false;
- for (int j=1;j<status.size();j++){
- // ignore status for index = 0 which is set to null
- if(! ((status.get(j)).equals(SyncResolver.NO_ROW_CONFLICT))) {
- // there is at least one conflict which needs to be resolved
- boolConf = true;
- break;
- }
- }
-
crs.beforeFirst();
this.crsResolve.beforeFirst();
- if(boolConf) {
- SyncProviderException spe = new SyncProviderException(status.size() - 1+resBundle.handleGetObject("crswriter.conflictsno").toString());
+ if(conflicts != 0) {
+ SyncProviderException spe = new SyncProviderException(conflicts + " " +
+ resBundle.handleGetObject("crswriter.conflictsno").toString());
//SyncResolver syncRes = spe.getSyncResolver();
SyncResolverImpl syncResImpl = (SyncResolverImpl) spe.getSyncResolver();
--- a/jdk/src/share/classes/com/sun/security/auth/PolicyFile.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/com/sun/security/auth/PolicyFile.java Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -59,13 +59,9 @@
*
* <ol>
* <li>
- * Loop through the <code>java.security.Security</code> properties,
+ * Loop through the security properties,
* <i>auth.policy.url.1</i>, <i>auth.policy.url.2</i>, ...,
- * <i>auth.policy.url.X</i>". These properties are set
- * in the Java security properties file, which is located in the file named
- * <JAVA_HOME>/lib/security/java.security.
- * <JAVA_HOME> refers to the value of the java.home system property,
- * and specifies the directory where the JRE is installed.
+ * <i>auth.policy.url.X</i>".
* Each property value specifies a <code>URL</code> pointing to a
* policy file to be loaded. Read in and load each policy.
*
@@ -235,6 +231,7 @@
* @see java.security.CodeSource
* @see java.security.Permissions
* @see java.security.ProtectionDomain
+ * @see java.security.Security security properties
*/
@Deprecated
public class PolicyFile extends javax.security.auth.Policy {
--- a/jdk/src/share/classes/com/sun/security/auth/login/ConfigFile.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/com/sun/security/auth/login/ConfigFile.java Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,13 +49,9 @@
*
* <ol>
* <li>
- * Loop through the <code>java.security.Security</code> properties,
+ * Loop through the security properties,
* <i>login.config.url.1</i>, <i>login.config.url.2</i>, ...,
- * <i>login.config.url.X</i>. These properties are set
- * in the Java security properties file, which is located in the file named
- * <JAVA_HOME>/lib/security/java.security.
- * <JAVA_HOME> refers to the value of the java.home system property,
- * and specifies the directory where the JRE is installed.
+ * <i>login.config.url.X</i>.
* Each property value specifies a <code>URL</code> pointing to a
* login configuration file to be loaded. Read in and load
* each configuration.
@@ -87,6 +83,7 @@
* <code>javax.security.auth.login.Configuration</code> class.
*
* @see javax.security.auth.login.LoginContext
+ * @see java.security.Security security properties
*/
public class ConfigFile extends javax.security.auth.login.Configuration {
--- a/jdk/src/share/classes/com/sun/security/auth/module/Krb5LoginModule.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/com/sun/security/auth/module/Krb5LoginModule.java Mon Dec 17 08:30:06 2012 -0500
@@ -1067,10 +1067,6 @@
if (ktab != null) {
if (!privCredSet.contains(ktab)) {
privCredSet.add(ktab);
- // Compatibility; also add keys to privCredSet
- for (KerberosKey key: ktab.getKeys(kerbClientPrinc)) {
- privCredSet.add(new Krb5Util.KeysFromKeyTab(key));
- }
}
} else {
succeeded = false;
--- a/jdk/src/share/classes/java/awt/color/ICC_Profile.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/awt/color/ICC_Profile.java Mon Dec 17 08:30:06 2012 -0500
@@ -1435,7 +1435,15 @@
int renderingIntent = intFromBigEndian(theHeader, icHdrRenderingIntent);
/* set the rendering intent */
- return renderingIntent;
+
+ /* According to ICC spec, only the least-significant 16 bits shall be
+ * used to encode the rendering intent. The most significant 16 bits
+ * shall be set to zero. Thus, we are ignoring two most significant
+ * bytes here.
+ *
+ * See http://www.color.org/ICC1v42_2006-05.pdf, section 7.2.15.
+ */
+ return (0xffff & renderingIntent);
}
--- a/jdk/src/share/classes/java/awt/image/ColorConvertOp.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/awt/image/ColorConvertOp.java Mon Dec 17 08:30:06 2012 -0500
@@ -732,10 +732,16 @@
private int getRenderingIntent (ICC_Profile profile) {
byte[] header = profile.getData(ICC_Profile.icSigHead);
int index = ICC_Profile.icHdrRenderingIntent;
- return (((header[index] & 0xff) << 24) |
- ((header[index+1] & 0xff) << 16) |
- ((header[index+2] & 0xff) << 8) |
- (header[index+3] & 0xff));
+
+ /* According to ICC spec, only the least-significant 16 bits shall be
+ * used to encode the rendering intent. The most significant 16 bits
+ * shall be set to zero. Thus, we are ignoring two most significant
+ * bytes here.
+ *
+ * See http://www.color.org/ICC1v42_2006-05.pdf, section 7.2.15.
+ */
+ return ((header[index+2] & 0xff) << 8) |
+ (header[index+3] & 0xff);
}
/**
--- a/jdk/src/share/classes/java/lang/ThreadLocal.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/lang/ThreadLocal.java Mon Dec 17 08:30:06 2012 -0500
@@ -25,19 +25,21 @@
package java.lang;
import java.lang.ref.*;
+import java.util.Objects;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
/**
* This class provides thread-local variables. These variables differ from
* their normal counterparts in that each thread that accesses one (via its
- * <tt>get</tt> or <tt>set</tt> method) has its own, independently initialized
- * copy of the variable. <tt>ThreadLocal</tt> instances are typically private
+ * {@code get} or {@code set} method) has its own, independently initialized
+ * copy of the variable. {@code ThreadLocal} instances are typically private
* static fields in classes that wish to associate state with a thread (e.g.,
* a user ID or Transaction ID).
*
* <p>For example, the class below generates unique identifiers local to each
* thread.
- * A thread's id is assigned the first time it invokes <tt>ThreadId.get()</tt>
+ * A thread's id is assigned the first time it invokes {@code ThreadId.get()}
* and remains unchanged on subsequent calls.
* <pre>
* import java.util.concurrent.atomic.AtomicInteger;
@@ -61,7 +63,7 @@
* }
* </pre>
* <p>Each thread holds an implicit reference to its copy of a thread-local
- * variable as long as the thread is alive and the <tt>ThreadLocal</tt>
+ * variable as long as the thread is alive and the {@code ThreadLocal}
* instance is accessible; after a thread goes away, all of its copies of
* thread-local instances are subject to garbage collection (unless other
* references to these copies exist).
@@ -108,14 +110,14 @@
* thread-local variable. This method will be invoked the first
* time a thread accesses the variable with the {@link #get}
* method, unless the thread previously invoked the {@link #set}
- * method, in which case the <tt>initialValue</tt> method will not
+ * method, in which case the {@code initialValue} method will not
* be invoked for the thread. Normally, this method is invoked at
* most once per thread, but it may be invoked again in case of
* subsequent invocations of {@link #remove} followed by {@link #get}.
*
- * <p>This implementation simply returns <tt>null</tt>; if the
+ * <p>This implementation simply returns {@code null}; if the
* programmer desires thread-local variables to have an initial
- * value other than <tt>null</tt>, <tt>ThreadLocal</tt> must be
+ * value other than {@code null}, {@code ThreadLocal} must be
* subclassed, and this method overridden. Typically, an
* anonymous inner class will be used.
*
@@ -126,7 +128,21 @@
}
/**
+ * Creates a thread local variable. The initial value of the variable is
+ * determined by invoking the {@code get} method on the {@code Supplier}.
+ *
+ * @param supplier the supplier to be used to determine the initial value
+ * @return a new thread local variable
+ * @throws NullPointerException if the specified supplier is null
+ * @since 1.8
+ */
+ public static <T> ThreadLocal<T> withInitial(Supplier<? extends T> supplier) {
+ return new SuppliedThreadLocal<>(supplier);
+ }
+
+ /**
* Creates a thread local variable.
+ * @see #withInitial(java.util.function.Supplier)
*/
public ThreadLocal() {
}
@@ -195,7 +211,7 @@
* reinitialized by invoking its {@link #initialValue} method,
* unless its value is {@linkplain #set set} by the current thread
* in the interim. This may result in multiple invocations of the
- * <tt>initialValue</tt> method in the current thread.
+ * {@code initialValue} method in the current thread.
*
* @since 1.5
*/
@@ -251,6 +267,24 @@
}
/**
+ * An extension of ThreadLocal that obtains its initial value from
+ * the specified {@code Supplier}.
+ */
+ static final class SuppliedThreadLocal<T> extends ThreadLocal<T> {
+
+ private final Supplier<? extends T> supplier;
+
+ SuppliedThreadLocal(Supplier<? extends T> supplier) {
+ this.supplier = Objects.requireNonNull(supplier);
+ }
+
+ @Override
+ protected T initialValue() {
+ return supplier.get();
+ }
+ }
+
+ /**
* ThreadLocalMap is a customized hash map suitable only for
* maintaining thread local values. No operations are exported
* outside of the ThreadLocal class. The class is package private to
@@ -599,9 +633,9 @@
* @param i a position known NOT to hold a stale entry. The
* scan starts at the element after i.
*
- * @param n scan control: <tt>log2(n)</tt> cells are scanned,
+ * @param n scan control: {@code log2(n)} cells are scanned,
* unless a stale entry is found, in which case
- * <tt>log2(table.length)-1</tt> additional cells are scanned.
+ * {@code log2(table.length)-1} additional cells are scanned.
* When called from insertions, this parameter is the number
* of elements, but when from replaceStaleEntry, it is the
* table length. (Note: all this could be changed to be either
--- a/jdk/src/share/classes/java/lang/invoke/BoundMethodHandle.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/lang/invoke/BoundMethodHandle.java Mon Dec 17 08:30:06 2012 -0500
@@ -25,7 +25,7 @@
package java.lang.invoke;
-import static com.sun.xml.internal.ws.org.objectweb.asm.Opcodes.*;
+import static jdk.internal.org.objectweb.asm.Opcodes.*;
import static java.lang.invoke.LambdaForm.basicTypes;
import static java.lang.invoke.MethodHandleNatives.Constants.REF_invokeStatic;
import static java.lang.invoke.MethodHandleStatics.*;
@@ -40,9 +40,9 @@
import sun.invoke.util.ValueConversions;
import sun.invoke.util.Wrapper;
-import com.sun.xml.internal.ws.org.objectweb.asm.ClassWriter;
-import com.sun.xml.internal.ws.org.objectweb.asm.MethodVisitor;
-import com.sun.xml.internal.ws.org.objectweb.asm.Type;
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.MethodVisitor;
+import jdk.internal.org.objectweb.asm.Type;
/**
* The flavor of method handle which emulates an invoke instruction
--- a/jdk/src/share/classes/java/lang/invoke/InnerClassLambdaMetafactory.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/lang/invoke/InnerClassLambdaMetafactory.java Mon Dec 17 08:30:06 2012 -0500
@@ -25,15 +25,15 @@
package java.lang.invoke;
-import java.io.FileOutputStream;
-import java.io.IOException;
+import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
import java.security.ProtectionDomain;
import java.util.concurrent.atomic.AtomicInteger;
-import sun.util.logging.PlatformLogger;
import jdk.internal.org.objectweb.asm.*;
import static jdk.internal.org.objectweb.asm.Opcodes.*;
import sun.misc.Unsafe;
+import java.security.AccessController;
+import java.security.PrivilegedAction;
/**
* InnerClassLambdaMetafactory
@@ -120,13 +120,34 @@
*
* @return a CallSite, which, when invoked, will return an instance of the
* functional interface
- * @throws ReflectiveOperationException
+ * @throws ReflectiveOperationException, LambdaConversionException
*/
@Override
CallSite buildCallSite() throws ReflectiveOperationException, LambdaConversionException {
final Class<?> innerClass = spinInnerClass();
if (invokedType.parameterCount() == 0) {
- return new ConstantCallSite(MethodHandles.constant(samBase, innerClass.newInstance()));
+ final Constructor[] ctrs = AccessController.doPrivileged(
+ new PrivilegedAction<Constructor[]>() {
+ @Override
+ public Constructor[] run() {
+ return innerClass.getDeclaredConstructors();
+ }
+ });
+ if (ctrs.length != 1) {
+ throw new ReflectiveOperationException("Expected one lambda constructor for "
+ + innerClass.getCanonicalName() + ", got " + ctrs.length);
+ }
+ // The lambda implementing inner class constructor is private, set
+ // it accessible (by us) before creating the constant sole instance
+ AccessController.doPrivileged(new PrivilegedAction<Void>() {
+ @Override
+ public Void run() {
+ ctrs[0].setAccessible(true);
+ return null;
+ }
+ });
+ Object inst = ctrs[0].newInstance();
+ return new ConstantCallSite(MethodHandles.constant(samBase, inst));
} else {
return new ConstantCallSite(
MethodHandles.Lookup.IMPL_LOOKUP
@@ -144,7 +165,7 @@
private <T> Class<? extends T> spinInnerClass() throws LambdaConversionException {
String samName = samBase.getName().replace('.', '/');
- cw.visit(CLASSFILE_VERSION, ACC_PUBLIC + ACC_SUPER, lambdaClassName, null, NAME_MAGIC_ACCESSOR_IMPL,
+ cw.visit(CLASSFILE_VERSION, ACC_SUPER, lambdaClassName, null, NAME_MAGIC_ACCESSOR_IMPL,
isSerializable ? new String[]{samName, NAME_SERIALIZABLE} : new String[]{samName});
// Generate final fields to be filled in by constructor
@@ -186,17 +207,27 @@
final byte[] classBytes = cw.toByteArray();
- if (System.getProperty("debug.dump.generated") != null) {
+ /*** Uncomment to dump the generated file
System.out.printf("Loaded: %s (%d bytes) %n", lambdaClassName, classBytes.length);
try (FileOutputStream fos = new FileOutputStream(lambdaClassName.replace('/', '.') + ".class")) {
fos.write(classBytes);
} catch (IOException ex) {
- PlatformLogger.getLogger(InnerClassLambdaMetafactory.class.getName()).severe(ex.getMessage(), ex);
+ Logger.getLogger(InnerClassLambdaMetafactory.class.getName()).log(Level.SEVERE, null, ex);
}
- }
+ ***/
ClassLoader loader = targetClass.getClassLoader();
- ProtectionDomain pd = (loader == null) ? null : targetClass.getProtectionDomain();
+ ProtectionDomain pd = (loader == null)
+ ? null
+ : AccessController.doPrivileged(
+ new PrivilegedAction<ProtectionDomain>() {
+ @Override
+ public ProtectionDomain run() {
+ return targetClass.getProtectionDomain();
+ }
+ }
+ );
+
return (Class<? extends T>) Unsafe.getUnsafe().defineClass(lambdaClassName, classBytes, 0, classBytes.length, loader, pd);
}
@@ -205,7 +236,7 @@
*/
private void generateConstructor() {
// Generate constructor
- MethodVisitor ctor = cw.visitMethod(ACC_PUBLIC, NAME_CTOR, constructorDesc, null, null);
+ MethodVisitor ctor = cw.visitMethod(ACC_PRIVATE, NAME_CTOR, constructorDesc, null, null);
ctor.visitCode();
ctor.visitVarInsn(ALOAD, 0);
ctor.visitMethodInsn(INVOKESPECIAL, NAME_MAGIC_ACCESSOR_IMPL, NAME_CTOR, METHOD_DESCRIPTOR_VOID);
--- a/jdk/src/share/classes/java/lang/invoke/InvokerBytecodeGenerator.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/lang/invoke/InvokerBytecodeGenerator.java Mon Dec 17 08:30:06 2012 -0500
@@ -34,7 +34,7 @@
import java.io.*;
import java.util.*;
-import com.sun.xml.internal.ws.org.objectweb.asm.*;
+import jdk.internal.org.objectweb.asm.*;
import java.lang.reflect.*;
import static java.lang.invoke.MethodHandleStatics.*;
--- a/jdk/src/share/classes/java/net/HttpCookie.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/net/HttpCookie.java Mon Dec 17 08:30:06 2012 -0500
@@ -30,6 +30,8 @@
import java.util.NoSuchElementException;
import java.text.SimpleDateFormat;
import java.util.TimeZone;
+import java.util.Calendar;
+import java.util.GregorianCalendar;
import java.util.Date;
import java.util.Locale;
import java.util.Objects;
@@ -89,7 +91,10 @@
private final static String[] COOKIE_DATE_FORMATS = {
"EEE',' dd-MMM-yyyy HH:mm:ss 'GMT'",
"EEE',' dd MMM yyyy HH:mm:ss 'GMT'",
- "EEE MMM dd yyyy HH:mm:ss 'GMT'Z"
+ "EEE MMM dd yyyy HH:mm:ss 'GMT'Z",
+ "EEE',' dd-MMM-yy HH:mm:ss 'GMT'",
+ "EEE',' dd MMM yy HH:mm:ss 'GMT'",
+ "EEE MMM dd yy HH:mm:ss 'GMT'Z"
};
// constant strings represent set-cookie header token
@@ -1025,13 +1030,29 @@
* specified by dateString
*/
private long expiryDate2DeltaSeconds(String dateString) {
+ Calendar cal = new GregorianCalendar(GMT);
for (int i = 0; i < COOKIE_DATE_FORMATS.length; i++) {
SimpleDateFormat df = new SimpleDateFormat(COOKIE_DATE_FORMATS[i],
Locale.US);
+ cal.set(1970, 0, 1, 0, 0, 0);
df.setTimeZone(GMT);
+ df.setLenient(false);
+ df.set2DigitYearStart(cal.getTime());
try {
- Date date = df.parse(dateString);
- return (date.getTime() - whenCreated) / 1000;
+ cal.setTime(df.parse(dateString));
+ if (!COOKIE_DATE_FORMATS[i].contains("yyyy")) {
+ // 2-digit years following the standard set
+ // out it rfc 6265
+ int year = cal.get(Calendar.YEAR);
+ year %= 100;
+ if (year < 70) {
+ year += 2000;
+ } else {
+ year += 1900;
+ }
+ cal.set(Calendar.YEAR, year);
+ }
+ return (cal.getTimeInMillis() - whenCreated) / 1000;
} catch (Exception e) {
// Ignore, try the next date format
}
--- a/jdk/src/share/classes/java/net/Inet6Address.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/net/Inet6Address.java Mon Dec 17 08:30:06 2012 -0500
@@ -25,9 +25,9 @@
package java.net;
-import java.io.ObjectInputStream;
import java.io.IOException;
import java.io.InvalidObjectException;
+import java.io.ObjectInputStream;
import java.util.Enumeration;
/**
@@ -116,7 +116,8 @@
* <h4> Special IPv6 address </h4>
*
* <blockquote>
- * <table cellspacing=2 summary="Description of IPv4-mapped address"> <tr><th valign=top><i>IPv4-mapped address</i></th>
+ * <table cellspacing=2 summary="Description of IPv4-mapped address">
+ * <tr><th valign=top><i>IPv4-mapped address</i></th>
* <td>Of the form::ffff:w.x.y.z, this IPv6 address is used to
* represent an IPv4 address. It allows the native program to
* use the same address data structure and also the same
@@ -130,35 +131,40 @@
* address.</td></tr>
* </table></blockquote>
* <p>
- * <h4> <A NAME="scoped">Textual representation of IPv6 scoped addresses</a> </h4>
- * <p>
- * The textual representation of IPv6 addresses as described above can be extended
- * to specify IPv6 scoped addresses. This extension to the basic addressing architecture
- * is described in [draft-ietf-ipngwg-scoping-arch-04.txt].
- * <p>
- * Because link-local and site-local addresses are non-global, it is possible that different hosts
- * may have the same destination address and may be reachable through different interfaces on the
- * same originating system. In this case, the originating system is said to be connected
- * to multiple zones of the same scope. In order to disambiguate which is the intended destination
- * zone, it is possible to append a zone identifier (or <i>scope_id</i>) to an IPv6 address.
- * <p>
- * The general format for specifying the <i>scope_id</i> is the following:
+ * <h4><A NAME="scoped">Textual representation of IPv6 scoped addresses</a></h4>
+ *
+ * <p> The textual representation of IPv6 addresses as described above can be
+ * extended to specify IPv6 scoped addresses. This extension to the basic
+ * addressing architecture is described in [draft-ietf-ipngwg-scoping-arch-04.txt].
+ *
+ * <p> Because link-local and site-local addresses are non-global, it is possible
+ * that different hosts may have the same destination address and may be
+ * reachable through different interfaces on the same originating system. In
+ * this case, the originating system is said to be connected to multiple zones
+ * of the same scope. In order to disambiguate which is the intended destination
+ * zone, it is possible to append a zone identifier (or <i>scope_id</i>) to an
+ * IPv6 address.
+ *
+ * <p> The general format for specifying the <i>scope_id</i> is the following:
+ *
* <p><blockquote><i>IPv6-address</i>%<i>scope_id</i></blockquote>
* <p> The IPv6-address is a literal IPv6 address as described above.
- * The <i>scope_id</i> refers to an interface on the local system, and it can be specified
- * in two ways.
- * <p><ol><li><i>As a numeric identifier.</i> This must be a positive integer that identifies the
- * particular interface and scope as understood by the system. Usually, the numeric
- * values can be determined through administration tools on the system. Each interface may
- * have multiple values, one for each scope. If the scope is unspecified, then the default value
- * used is zero.</li><p>
- * <li><i>As a string.</i> This must be the exact string that is returned by
- * {@link java.net.NetworkInterface#getName()} for the particular interface in question.
- * When an Inet6Address is created in this way, the numeric scope-id is determined at the time
- * the object is created by querying the relevant NetworkInterface.</li>
- * </ol><p>
- * Note also, that the numeric <i>scope_id</i> can be retrieved from Inet6Address instances returned from the
- * NetworkInterface class. This can be used to find out the current scope ids configured on the system.
+ * The <i>scope_id</i> refers to an interface on the local system, and it can be
+ * specified in two ways.
+ * <p><ol><li><i>As a numeric identifier.</i> This must be a positive integer
+ * that identifies the particular interface and scope as understood by the
+ * system. Usually, the numeric values can be determined through administration
+ * tools on the system. Each interface may have multiple values, one for each
+ * scope. If the scope is unspecified, then the default value used is zero.</li>
+ * <p><li><i>As a string.</i> This must be the exact string that is returned by
+ * {@link java.net.NetworkInterface#getName()} for the particular interface in
+ * question. When an Inet6Address is created in this way, the numeric scope-id
+ * is determined at the time the object is created by querying the relevant
+ * NetworkInterface.</li></ol>
+ *
+ * <p> Note also, that the numeric <i>scope_id</i> can be retrieved from
+ * Inet6Address instances returned from the NetworkInterface class. This can be
+ * used to find out the current scope ids configured on the system.
* @since 1.4
*/
@@ -169,7 +175,7 @@
/*
* cached scope_id - for link-local address use only.
*/
- private transient int cached_scope_id = 0;
+ private transient int cached_scope_id; // 0
/**
* Holds a 128-bit (16 bytes) IPv6 address.
@@ -179,37 +185,28 @@
byte[] ipaddress;
/**
- * scope_id. The scope specified when the object is created. If the object is created
- * with an interface name, then the scope_id is not determined until the time it is needed.
+ * scope_id. The scope specified when the object is created. If the object
+ * is created with an interface name, then the scope_id is not determined
+ * until the time it is needed.
*/
- private int scope_id = 0;
+ private int scope_id; // 0
/**
* This will be set to true when the scope_id field contains a valid
* integer scope_id.
*/
- private boolean scope_id_set = false;
+ private boolean scope_id_set; // false
/**
* scoped interface. scope_id is derived from this as the scope_id of the first
* address whose scope is the same as this address for the named interface.
*/
- private transient NetworkInterface scope_ifname = null;
-
- /**
- * set if the object is constructed with a scoped interface instead of a
- * numeric scope id.
- */
- private boolean scope_ifname_set = false;
+ private transient NetworkInterface scope_ifname; // null
private static final long serialVersionUID = 6880410070516793377L;
- /*
- * Perform initializations.
- */
- static {
- init();
- }
+ // Perform native initialization
+ static { init(); }
Inet6Address() {
super();
@@ -239,19 +236,24 @@
} catch (UnknownHostException e) {} /* cant happen if ifname is null */
}
- Inet6Address (String hostName, byte addr[], NetworkInterface nif) throws UnknownHostException {
+ Inet6Address (String hostName, byte addr[], NetworkInterface nif)
+ throws UnknownHostException
+ {
initif (hostName, addr, nif);
}
- Inet6Address (String hostName, byte addr[], String ifname) throws UnknownHostException {
+ Inet6Address (String hostName, byte addr[], String ifname)
+ throws UnknownHostException
+ {
initstr (hostName, addr, ifname);
}
/**
- * Create an Inet6Address in the exact manner of {@link InetAddress#getByAddress(String,byte[])}
- * except that the IPv6 scope_id is set to the value corresponding to the given interface
- * for the address type specified in <code>addr</code>.
- * The call will fail with an UnknownHostException if the given interface does not have a numeric
+ * Create an Inet6Address in the exact manner of {@link
+ * InetAddress#getByAddress(String,byte[])} except that the IPv6 scope_id is
+ * set to the value corresponding to the given interface for the address
+ * type specified in <code>addr</code>. The call will fail with an
+ * UnknownHostException if the given interface does not have a numeric
* scope_id assigned for the given address type (eg. link-local or site-local).
* See <a href="Inet6Address.html#scoped">here</a> for a description of IPv6
* scoped addresses.
@@ -260,14 +262,16 @@
* @param addr the raw IP address in network byte order
* @param nif an interface this address must be associated with.
* @return an Inet6Address object created from the raw IP address.
- * @exception UnknownHostException if IP address is of illegal length, or if the interface
- * does not have a numeric scope_id assigned for the given address type.
+ * @throws UnknownHostException
+ * if IP address is of illegal length, or if the interface does not
+ * have a numeric scope_id assigned for the given address type.
*
* @since 1.5
*/
-
- public static Inet6Address getByAddress(String host, byte[] addr, NetworkInterface nif)
- throws UnknownHostException {
+ public static Inet6Address getByAddress(String host, byte[] addr,
+ NetworkInterface nif)
+ throws UnknownHostException
+ {
if (host != null && host.length() > 0 && host.charAt(0) == '[') {
if (host.charAt(host.length()-1) == ']') {
host = host.substring(1, host.length() -1);
@@ -282,9 +286,10 @@
}
/**
- * Create an Inet6Address in the exact manner of {@link InetAddress#getByAddress(String,byte[])}
- * except that the IPv6 scope_id is set to the given numeric value.
- * The scope_id is not checked to determine if it corresponds to any interface on the system.
+ * Create an Inet6Address in the exact manner of {@link
+ * InetAddress#getByAddress(String,byte[])} except that the IPv6 scope_id is
+ * set to the given numeric value. The scope_id is not checked to determine
+ * if it corresponds to any interface on the system.
* See <a href="Inet6Address.html#scoped">here</a> for a description of IPv6
* scoped addresses.
*
@@ -292,13 +297,14 @@
* @param addr the raw IP address in network byte order
* @param scope_id the numeric scope_id for the address.
* @return an Inet6Address object created from the raw IP address.
- * @exception UnknownHostException if IP address is of illegal length.
+ * @throws UnknownHostException if IP address is of illegal length.
*
* @since 1.5
*/
-
- public static Inet6Address getByAddress(String host, byte[] addr, int scope_id)
- throws UnknownHostException {
+ public static Inet6Address getByAddress(String host, byte[] addr,
+ int scope_id)
+ throws UnknownHostException
+ {
if (host != null && host.length() > 0 && host.charAt(0) == '[') {
if (host.charAt(host.length()-1) == ']') {
host = host.substring(1, host.length() -1);
@@ -312,7 +318,9 @@
throw new UnknownHostException("addr is of illegal length");
}
- private void initstr (String hostName, byte addr[], String ifname) throws UnknownHostException {
+ private void initstr(String hostName, byte addr[], String ifname)
+ throws UnknownHostException
+ {
try {
NetworkInterface nif = NetworkInterface.getByName (ifname);
if (nif == null) {
@@ -324,16 +332,17 @@
}
}
- private void initif(String hostName, byte addr[],NetworkInterface nif) throws UnknownHostException {
+ private void initif(String hostName, byte addr[],NetworkInterface nif)
+ throws UnknownHostException
+ {
this.hostName = hostName;
if (addr.length == INADDRSZ) { // normal IPv6 address
family = IPv6;
ipaddress = addr.clone();
}
if (nif != null) {
- this.scope_ifname = nif;
- scope_ifname_set = true;
- scope_id = deriveNumericScope (nif);
+ scope_ifname = nif;
+ scope_id = deriveNumericScope(nif);
scope_id_set = true;
}
}
@@ -344,17 +353,16 @@
* return true otherwise.
*/
private boolean differentLocalAddressTypes(Inet6Address other) {
-
- if (isLinkLocalAddress() && !other.isLinkLocalAddress()) {
+ if (isLinkLocalAddress() && !other.isLinkLocalAddress())
return false;
- }
- if (isSiteLocalAddress() && !other.isSiteLocalAddress()) {
+ if (isSiteLocalAddress() && !other.isSiteLocalAddress())
return false;
- }
return true;
}
- private int deriveNumericScope (NetworkInterface ifc) throws UnknownHostException {
+ private int deriveNumericScope(NetworkInterface ifc)
+ throws UnknownHostException
+ {
Enumeration<InetAddress> addresses = ifc.getInetAddresses();
while (addresses.hasMoreElements()) {
InetAddress addr = addresses.nextElement();
@@ -373,16 +381,17 @@
throw new UnknownHostException ("no scope_id found");
}
- private int deriveNumericScope (String ifname) throws UnknownHostException {
+ private int deriveNumericScope(String ifname) throws UnknownHostException {
Enumeration<NetworkInterface> en;
try {
en = NetworkInterface.getNetworkInterfaces();
} catch (SocketException e) {
- throw new UnknownHostException ("could not enumerate local network interfaces");
+ throw new UnknownHostException(
+ "could not enumerate local network interfaces");
}
while (en.hasMoreElements()) {
NetworkInterface ifc = en.nextElement();
- if (ifc.getName().equals (ifname)) {
+ if (ifc.getName().equals(ifname)) {
Enumeration<InetAddress> addresses = ifc.getInetAddresses();
while (addresses.hasMoreElements()) {
InetAddress addr = addresses.nextElement();
@@ -400,7 +409,8 @@
}
}
}
- throw new UnknownHostException ("No matching address found for interface : " +ifname);
+ throw new UnknownHostException(
+ "No matching address found for interface : " +ifname);
}
/**
@@ -410,22 +420,14 @@
*/
private void readObject(ObjectInputStream s)
throws IOException, ClassNotFoundException {
- scope_ifname = null;
- scope_ifname_set = false;
s.defaultReadObject();
- if (ifname != null && !"".equals (ifname)) {
+ if (ifname != null && !ifname.equals("")) {
try {
scope_ifname = NetworkInterface.getByName(ifname);
- if (scope_ifname == null) {
- /* the interface does not exist on this system, so we clear
- * the scope information completely */
- scope_id_set = false;
- scope_ifname_set = false;
- scope_id = 0;
- } else {
+ if (scope_ifname != null) {
try {
- scope_id = deriveNumericScope (scope_ifname);
+ scope_id = deriveNumericScope(scope_ifname);
} catch (UnknownHostException e) {
// typically should not happen, but it may be that
// the machine being used for deserialization has
@@ -455,8 +457,9 @@
* address. 11111111 at the start of the address identifies the
* address as being a multicast address.
*
- * @return a <code>boolean</code> indicating if the InetAddress is
- * an IP multicast address
+ * @return a {@code boolean} indicating if the InetAddress is an IP
+ * multicast address
+ *
* @since JDK1.1
*/
@Override
@@ -466,8 +469,10 @@
/**
* Utility routine to check if the InetAddress in a wildcard address.
- * @return a <code>boolean</code> indicating if the Inetaddress is
+ *
+ * @return a {@code boolean} indicating if the Inetaddress is
* a wildcard address.
+ *
* @since 1.4
*/
@Override
@@ -482,8 +487,9 @@
/**
* Utility routine to check if the InetAddress is a loopback address.
*
- * @return a <code>boolean</code> indicating if the InetAddress is
- * a loopback address; or false otherwise.
+ * @return a {@code boolean} indicating if the InetAddress is a loopback
+ * address; or false otherwise.
+ *
* @since 1.4
*/
@Override
@@ -498,8 +504,9 @@
/**
* Utility routine to check if the InetAddress is an link local address.
*
- * @return a <code>boolean</code> indicating if the InetAddress is
- * a link local address; or false if address is not a link local unicast address.
+ * @return a {@code boolean} indicating if the InetAddress is a link local
+ * address; or false if address is not a link local unicast address.
+ *
* @since 1.4
*/
@Override
@@ -511,8 +518,9 @@
/**
* Utility routine to check if the InetAddress is a site local address.
*
- * @return a <code>boolean</code> indicating if the InetAddress is
- * a site local address; or false if address is not a site local unicast address.
+ * @return a {@code boolean} indicating if the InetAddress is a site local
+ * address; or false if address is not a site local unicast address.
+ *
* @since 1.4
*/
@Override
@@ -524,9 +532,10 @@
/**
* Utility routine to check if the multicast address has global scope.
*
- * @return a <code>boolean</code> indicating if the address has
- * is a multicast address of global scope, false if it is not
- * of global scope or it is not a multicast address
+ * @return a {@code boolean} indicating if the address has is a multicast
+ * address of global scope, false if it is not of global scope or
+ * it is not a multicast address
+ *
* @since 1.4
*/
@Override
@@ -538,9 +547,10 @@
/**
* Utility routine to check if the multicast address has node scope.
*
- * @return a <code>boolean</code> indicating if the address has
- * is a multicast address of node-local scope, false if it is not
- * of node-local scope or it is not a multicast address
+ * @return a {@code boolean} indicating if the address has is a multicast
+ * address of node-local scope, false if it is not of node-local
+ * scope or it is not a multicast address
+ *
* @since 1.4
*/
@Override
@@ -552,9 +562,10 @@
/**
* Utility routine to check if the multicast address has link scope.
*
- * @return a <code>boolean</code> indicating if the address has
- * is a multicast address of link-local scope, false if it is not
- * of link-local scope or it is not a multicast address
+ * @return a {@code boolean} indicating if the address has is a multicast
+ * address of link-local scope, false if it is not of link-local
+ * scope or it is not a multicast address
+ *
* @since 1.4
*/
@Override
@@ -566,9 +577,10 @@
/**
* Utility routine to check if the multicast address has site scope.
*
- * @return a <code>boolean</code> indicating if the address has
- * is a multicast address of site-local scope, false if it is not
- * of site-local scope or it is not a multicast address
+ * @return a {@code boolean} indicating if the address has is a multicast
+ * address of site-local scope, false if it is not of site-local
+ * scope or it is not a multicast address
+ *
* @since 1.4
*/
@Override
@@ -580,10 +592,10 @@
/**
* Utility routine to check if the multicast address has organization scope.
*
- * @return a <code>boolean</code> indicating if the address has
- * is a multicast address of organization-local scope,
- * false if it is not of organization-local scope
- * or it is not a multicast address
+ * @return a {@code boolean} indicating if the address has is a multicast
+ * address of organization-local scope, false if it is not of
+ * organization-local scope or it is not a multicast address
+ *
* @since 1.4
*/
@Override
@@ -593,9 +605,9 @@
}
/**
- * Returns the raw IP address of this <code>InetAddress</code>
- * object. The result is in network byte order: the highest order
- * byte of the address is in <code>getAddress()[0]</code>.
+ * Returns the raw IP address of this {@code InetAddress} object. The result
+ * is in network byte order: the highest order byte of the address is in
+ * {@code getAddress()[0]}.
*
* @return the raw IP address of this object.
*/
@@ -609,9 +621,10 @@
* an interface. If no scoped_id is set, the returned value is zero.
*
* @return the scopeId, or zero if not set.
+ *
* @since 1.5
*/
- public int getScopeId () {
+ public int getScopeId() {
return scope_id;
}
@@ -622,22 +635,23 @@
* @return the scoped interface, or null if not set.
* @since 1.5
*/
- public NetworkInterface getScopedInterface () {
+ public NetworkInterface getScopedInterface() {
return scope_ifname;
}
/**
- * Returns the IP address string in textual presentation. If the instance was created
- * specifying a scope identifier then the scope id is appended to the IP address preceded by
- * a "%" (per-cent) character. This can be either a numeric value or a string, depending on which
- * was used to createthe instance.
+ * Returns the IP address string in textual presentation. If the instance
+ * was created specifying a scope identifier then the scope id is appended
+ * to the IP address preceded by a "%" (per-cent) character. This can be
+ * either a numeric value or a string, depending on which was used to create
+ * the instance.
*
* @return the raw IP address in a string format.
*/
@Override
public String getHostAddress() {
String s = numericToTextFormat(ipaddress);
- if (scope_ifname_set) { /* must check this first */
+ if (scope_ifname != null) { /* must check this first */
s = s + "%" + scope_ifname.getName();
} else if (scope_id_set) {
s = s + "%" + scope_id;
@@ -674,29 +688,27 @@
}
/**
- * Compares this object against the specified object.
- * The result is <code>true</code> if and only if the argument is
- * not <code>null</code> and it represents the same IP address as
- * this object.
- * <p>
- * Two instances of <code>InetAddress</code> represent the same IP
- * address if the length of the byte arrays returned by
- * <code>getAddress</code> is the same for both, and each of the
- * array components is the same for the byte arrays.
+ * Compares this object against the specified object. The result is {@code
+ * true} if and only if the argument is not {@code null} and it represents
+ * the same IP address as this object.
+ *
+ * <p> Two instances of {@code InetAddress} represent the same IP address
+ * if the length of the byte arrays returned by {@code getAddress} is the
+ * same for both, and each of the array components is the same for the byte
+ * arrays.
*
* @param obj the object to compare against.
- * @return <code>true</code> if the objects are the same;
- * <code>false</code> otherwise.
+ *
+ * @return {@code true} if the objects are the same; {@code false} otherwise.
+ *
* @see java.net.InetAddress#getAddress()
*/
@Override
public boolean equals(Object obj) {
- if (obj == null ||
- !(obj instanceof Inet6Address))
+ if (obj == null || !(obj instanceof Inet6Address))
return false;
Inet6Address inetAddr = (Inet6Address)obj;
-
for (int i = 0; i < INADDRSZ; i++) {
if (ipaddress[i] != inetAddr.ipaddress[i])
return false;
@@ -709,8 +721,9 @@
* Utility routine to check if the InetAddress is an
* IPv4 compatible IPv6 address.
*
- * @return a <code>boolean</code> indicating if the InetAddress is
- * an IPv4 compatible IPv6 address; or false if address is IPv4 address.
+ * @return a {@code boolean} indicating if the InetAddress is an IPv4
+ * compatible IPv6 address; or false if address is IPv4 address.
+ *
* @since 1.4
*/
public boolean isIPv4CompatibleAddress() {
@@ -727,6 +740,7 @@
// Utilities
private final static int INT16SZ = 2;
+
/*
* Convert IPv6 binary address into presentation (printable) format.
*
@@ -735,9 +749,8 @@
* textual representation format
* @since 1.4
*/
- static String numericToTextFormat(byte[] src)
- {
- StringBuffer sb = new StringBuffer(39);
+ static String numericToTextFormat(byte[] src) {
+ StringBuilder sb = new StringBuilder(39);
for (int i = 0; i < (INADDRSZ / INT16SZ); i++) {
sb.append(Integer.toHexString(((src[i<<1]<<8) & 0xff00)
| (src[(i<<1)+1] & 0xff)));
@@ -766,9 +779,8 @@
private synchronized void writeObject(java.io.ObjectOutputStream s)
throws IOException
{
- if (scope_ifname_set) {
+ if (scope_ifname != null)
ifname = scope_ifname.getName();
- }
s.defaultWriteObject();
}
}
--- a/jdk/src/share/classes/java/net/URLConnection.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/net/URLConnection.java Mon Dec 17 08:30:06 2012 -0500
@@ -129,15 +129,6 @@
* <a href="http://www.ietf.org/rfc/rfc2616.txt">http://www.ietf.org/rfc/rfc2616.txt</a>
* </pre></blockquote>
*
- * Note about <code>fileNameMap</code>: In versions prior to JDK 1.1.6,
- * field <code>fileNameMap</code> of <code>URLConnection</code> was public.
- * In JDK 1.1.6 and later, <code>fileNameMap</code> is private; accessor
- * and mutator methods {@link #getFileNameMap() getFileNameMap} and
- * {@link #setFileNameMap(java.net.FileNameMap) setFileNameMap} are added
- * to access it. This change is also described on the <a href=
- * "http://java.sun.com/products/jdk/1.2/compatibility.html">
- * Compatibility</a> page.
- *
* Invoking the <tt>close()</tt> methods on the <tt>InputStream</tt> or <tt>OutputStream</tt> of an
* <tt>URLConnection</tt> after a request may free network resources associated with this
* instance, unless particular protocol specifications specify different behaviours
@@ -305,8 +296,7 @@
* Loads filename map (a mimetable) from a data file. It will
* first try to load the user-specific table, defined
* by "content.types.user.table" property. If that fails,
- * it tries to load the default built-in table at
- * lib/content-types.properties under java home.
+ * it tries to load the default built-in table.
*
* @return the FileNameMap
* @since 1.2
--- a/jdk/src/share/classes/java/net/doc-files/net-properties.html Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/net/doc-files/net-properties.html Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
<!--
- Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
This code is free software; you can redistribute it and/or modify it
@@ -237,6 +237,6 @@
</UL>
<P>Since these 2 properties are part of the security policy, they are
not set by either the -D option or the System.setProperty() API,
-instead they are set in the JRE security policy file <code>lib/security/java.security</code>.</P>
+instead they are set as security properties.</P>
</BODY>
</HTML>
--- a/jdk/src/share/classes/java/security/KeyStore.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/security/KeyStore.java Mon Dec 17 08:30:06 2012 -0500
@@ -695,27 +695,23 @@
}
/**
- * Returns the default keystore type as specified in the Java security
- * properties file, or the string
- * "jks" (acronym for "Java keystore")
+ * Returns the default keystore type as specified by the
+ * {@code keystore.type} security property, or the string
+ * {@literal "jks"} (acronym for {@literal "Java keystore"})
* if no such property exists.
- * The Java security properties file is located in the file named
- * <JAVA_HOME>/lib/security/java.security.
- * <JAVA_HOME> refers to the value of the java.home system property,
- * and specifies the directory where the JRE is installed.
*
* <p>The default keystore type can be used by applications that do not
* want to use a hard-coded keystore type when calling one of the
- * <code>getInstance</code> methods, and want to provide a default keystore
+ * {@code getInstance} methods, and want to provide a default keystore
* type in case a user does not specify its own.
*
* <p>The default keystore type can be changed by setting the value of the
- * "keystore.type" security property (in the Java security properties
- * file) to the desired keystore type.
+ * {@code keystore.type} security property to the desired keystore type.
*
- * @return the default keystore type as specified in the
- * Java security properties file, or the string "jks"
+ * @return the default keystore type as specified by the
+ * {@code keystore.type} security property, or the string {@literal "jks"}
* if no such property exists.
+ * @see java.security.Security security properties
*/
public final static String getDefaultType() {
String kstype;
--- a/jdk/src/share/classes/java/security/Policy.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/security/Policy.java Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -48,12 +48,8 @@
* <code>getPolicy</code> installs an instance of the default Policy
* implementation (a default subclass implementation of this abstract class).
* The default Policy implementation can be changed by setting the value
- * of the "policy.provider" security property (in the Java security properties
- * file) to the fully qualified name of the desired Policy subclass
- * implementation. The Java security properties file is located in the
- * file named <JAVA_HOME>/lib/security/java.security.
- * <JAVA_HOME> refers to the value of the java.home system property,
- * and specifies the directory where the JRE is installed.
+ * of the {@code policy.provider} security property to the fully qualified
+ * name of the desired Policy subclass implementation.
*
* <p> Application code can directly subclass Policy to provide a custom
* implementation. In addition, an instance of a Policy object can be
@@ -84,6 +80,7 @@
* @see java.security.Provider
* @see java.security.ProtectionDomain
* @see java.security.Permission
+ * @see java.security.Security security properties
*/
public abstract class Policy {
--- a/jdk/src/share/classes/java/security/Security.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/security/Security.java Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1996, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1996, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,10 @@
* <p>This class centralizes all security properties and common security
* methods. One of its primary uses is to manage providers.
*
+ * <p>The default values of security properties are read from an
+ * implementation-specific location, which is typically the properties file
+ * {@code lib/security/java.security} in the Java installation directory.
+ *
* @author Benjamin Renaud
*/
--- a/jdk/src/share/classes/java/security/cert/CertPathBuilder.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/security/cert/CertPathBuilder.java Mon Dec 17 08:30:06 2012 -0500
@@ -281,25 +281,23 @@
}
/**
- * Returns the default <code>CertPathBuilder</code> type as specified in
- * the Java security properties file, or the string "PKIX"
- * if no such property exists. The Java security properties file is
- * located in the file named <JAVA_HOME>/lib/security/java.security.
- * <JAVA_HOME> refers to the value of the java.home system property,
- * and specifies the directory where the JRE is installed.
+ * Returns the default {@code CertPathBuilder} type as specified by
+ * the {@code certpathbuilder.type} security property, or the string
+ * {@literal "PKIX"} if no such property exists.
*
- * <p>The default <code>CertPathBuilder</code> type can be used by
+ * <p>The default {@code CertPathBuilder} type can be used by
* applications that do not want to use a hard-coded type when calling one
- * of the <code>getInstance</code> methods, and want to provide a default
+ * of the {@code getInstance} methods, and want to provide a default
* type in case a user does not specify its own.
*
- * <p>The default <code>CertPathBuilder</code> type can be changed by
- * setting the value of the "certpathbuilder.type" security property
- * (in the Java security properties file) to the desired type.
+ * <p>The default {@code CertPathBuilder} type can be changed by
+ * setting the value of the {@code certpathbuilder.type} security property
+ * to the desired type.
*
- * @return the default <code>CertPathBuilder</code> type as specified
- * in the Java security properties file, or the string "PKIX"
- * if no such property exists.
+ * @see java.security.Security security properties
+ * @return the default {@code CertPathBuilder} type as specified
+ * by the {@code certpathbuilder.type} security property, or the string
+ * {@literal "PKIX"} if no such property exists.
*/
public final static String getDefaultType() {
String cpbtype =
--- a/jdk/src/share/classes/java/security/cert/CertPathValidator.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/security/cert/CertPathValidator.java Mon Dec 17 08:30:06 2012 -0500
@@ -293,25 +293,23 @@
}
/**
- * Returns the default <code>CertPathValidator</code> type as specified in
- * the Java security properties file, or the string "PKIX"
- * if no such property exists. The Java security properties file is
- * located in the file named <JAVA_HOME>/lib/security/java.security.
- * <JAVA_HOME> refers to the value of the java.home system property,
- * and specifies the directory where the JRE is installed.
+ * Returns the default {@code CertPathValidator} type as specified by
+ * the {@code certpathvalidator.type} security property, or the string
+ * {@literal "PKIX"} if no such property exists.
*
- * <p>The default <code>CertPathValidator</code> type can be used by
+ * <p>The default {@code CertPathValidator} type can be used by
* applications that do not want to use a hard-coded type when calling one
- * of the <code>getInstance</code> methods, and want to provide a default
+ * of the {@code getInstance} methods, and want to provide a default
* type in case a user does not specify its own.
*
- * <p>The default <code>CertPathValidator</code> type can be changed by
- * setting the value of the "certpathvalidator.type" security property
- * (in the Java security properties file) to the desired type.
+ * <p>The default {@code CertPathValidator} type can be changed by
+ * setting the value of the {@code certpathvalidator.type} security
+ * property to the desired type.
*
- * @return the default <code>CertPathValidator</code> type as specified
- * in the Java security properties file, or the string "PKIX"
- * if no such property exists.
+ * @see java.security.Security security properties
+ * @return the default {@code CertPathValidator} type as specified
+ * by the {@code certpathvalidator.type} security property, or the string
+ * {@literal "PKIX"} if no such property exists.
*/
public final static String getDefaultType() {
String cpvtype =
--- a/jdk/src/share/classes/java/security/cert/CertStore.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/security/cert/CertStore.java Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -389,25 +389,23 @@
}
/**
- * Returns the default <code>CertStore</code> type as specified in the
- * Java security properties file, or the string "LDAP" if no
- * such property exists. The Java security properties file is located in
- * the file named <JAVA_HOME>/lib/security/java.security.
- * <JAVA_HOME> refers to the value of the java.home system property,
- * and specifies the directory where the JRE is installed.
+ * Returns the default {@code CertStore} type as specified by the
+ * {@code certstore.type} security property, or the string
+ * {@literal "LDAP"} if no such property exists.
*
- * <p>The default <code>CertStore</code> type can be used by applications
+ * <p>The default {@code CertStore} type can be used by applications
* that do not want to use a hard-coded type when calling one of the
- * <code>getInstance</code> methods, and want to provide a default
- * <code>CertStore</code> type in case a user does not specify its own.
+ * {@code getInstance} methods, and want to provide a default
+ * {@code CertStore} type in case a user does not specify its own.
*
- * <p>The default <code>CertStore</code> type can be changed by setting
- * the value of the "certstore.type" security property (in the Java
- * security properties file) to the desired type.
+ * <p>The default {@code CertStore} type can be changed by setting
+ * the value of the {@code certstore.type} security property to the
+ * desired type.
*
- * @return the default <code>CertStore</code> type as specified in the
- * Java security properties file, or the string "LDAP"
- * if no such property exists.
+ * @see java.security.Security security properties
+ * @return the default {@code CertStore} type as specified by the
+ * {@code certstore.type} security property, or the string
+ * {@literal "LDAP"} if no such property exists.
*/
public final static String getDefaultType() {
String cstype;
--- a/jdk/src/share/classes/java/text/DateFormatSymbols.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/text/DateFormatSymbols.java Mon Dec 17 08:30:06 2012 -0500
@@ -688,7 +688,16 @@
}
ResourceBundle resource = adapter.getLocaleData().getDateFormatData(locale);
- eras = resource.getStringArray("Eras");
+ // JRE and CLDR use different keys
+ // JRE: Eras, short.Eras and narrow.Eras
+ // CLDR: long.Eras, Eras and narrow.Eras
+ if (resource.containsKey("Eras")) {
+ eras = resource.getStringArray("Eras");
+ } else if (resource.containsKey("long.Eras")) {
+ eras = resource.getStringArray("long.Eras");
+ } else if (resource.containsKey("short.Eras")) {
+ eras = resource.getStringArray("short.Eras");
+ }
months = resource.getStringArray("MonthNames");
shortMonths = resource.getStringArray("MonthAbbreviations");
ampms = resource.getStringArray("AmPmMarkers");
--- a/jdk/src/share/classes/java/text/SimpleDateFormat.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/text/SimpleDateFormat.java Mon Dec 17 08:30:06 2012 -0500
@@ -48,12 +48,13 @@
import java.util.Locale;
import java.util.Map;
import java.util.SimpleTimeZone;
+import java.util.SortedMap;
import java.util.TimeZone;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
-import sun.util.locale.provider.LocaleProviderAdapter;
import sun.util.calendar.CalendarUtils;
import sun.util.calendar.ZoneInfoFile;
+import sun.util.locale.provider.LocaleProviderAdapter;
/**
* <code>SimpleDateFormat</code> is a concrete class for formatting and
@@ -1593,6 +1594,17 @@
private int matchString(String text, int start, int field,
Map<String,Integer> data, CalendarBuilder calb) {
if (data != null) {
+ // TODO: make this default when it's in the spec.
+ if (data instanceof SortedMap) {
+ for (String name : data.keySet()) {
+ if (text.regionMatches(true, start, name, 0, name.length())) {
+ calb.set(field, data.get(name));
+ return start + name.length();
+ }
+ }
+ return -start;
+ }
+
String bestMatch = null;
for (String name : data.keySet()) {
@@ -1803,7 +1815,7 @@
boolean obeyCount, boolean[] ambiguousYear,
ParsePosition origPos,
boolean useFollowingMinusSignAsDelimiter, CalendarBuilder calb) {
- Number number = null;
+ Number number;
int value = 0;
ParsePosition pos = new ParsePosition(0);
pos.index = start;
@@ -1876,9 +1888,7 @@
return index;
}
} else {
- Map<String, Integer> map = calendar.getDisplayNames(field,
- Calendar.ALL_STYLES,
- locale);
+ Map<String, Integer> map = getDisplayNamesMap(field, locale);
if ((index = matchString(text, start, field, map, calb)) > 0) {
return index;
}
@@ -1940,7 +1950,7 @@
// count >= 3 // i.e., MMM or MMMM
// Want to be able to parse both short and long forms.
// Try count == 4 first:
- int newStart = 0;
+ int newStart;
if ((newStart = matchString(text, start, Calendar.MONTH,
formatData.getMonths(), calb)) > 0) {
return newStart;
@@ -1951,9 +1961,7 @@
return index;
}
} else {
- Map<String, Integer> map = calendar.getDisplayNames(field,
- Calendar.ALL_STYLES,
- locale);
+ Map<String, Integer> map = getDisplayNamesMap(field, locale);
if ((index = matchString(text, start, field, map, calb)) > 0) {
return index;
}
@@ -1979,7 +1987,7 @@
if (useDateFormatSymbols) {
// Want to be able to parse both short and long forms.
// Try count == 4 (DDDD) first:
- int newStart = 0;
+ int newStart;
if ((newStart=matchString(text, start, Calendar.DAY_OF_WEEK,
formatData.getWeekdays(), calb)) > 0) {
return newStart;
@@ -2008,7 +2016,7 @@
return index;
}
} else {
- Map<String,Integer> map = calendar.getDisplayNames(field, Calendar.ALL_STYLES, locale);
+ Map<String,Integer> map = getDisplayNamesMap(field, locale);
if ((index = matchString(text, start, field, map, calb)) > 0) {
return index;
}
@@ -2098,7 +2106,7 @@
break parsing;
}
- int sign = 0;
+ int sign;
char c = text.charAt(pos.index);
if (c == 'Z') {
calb.set(Calendar.ZONE_OFFSET, 0).set(Calendar.DST_OFFSET, 0);
@@ -2340,6 +2348,21 @@
&& formatData.equals(that.formatData));
}
+ private static final int[] REST_OF_STYLES = {
+ Calendar.SHORT_STANDALONE, Calendar.LONG_FORMAT, Calendar.LONG_STANDALONE,
+ };
+ private Map<String, Integer> getDisplayNamesMap(int field, Locale locale) {
+ Map<String, Integer> map = calendar.getDisplayNames(field, Calendar.SHORT_FORMAT, locale);
+ // Get all SHORT and LONG styles (avoid NARROW styles).
+ for (int style : REST_OF_STYLES) {
+ Map<String, Integer> m = calendar.getDisplayNames(field, style, locale);
+ if (m != null) {
+ map.putAll(m);
+ }
+ }
+ return map;
+ }
+
/**
* After reading an object from the input stream, the format
* pattern in the object is verified.
--- a/jdk/src/share/classes/java/util/Base64.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/util/Base64.java Mon Dec 17 08:30:06 2012 -0500
@@ -289,8 +289,8 @@
*
* <p> This method first encodes all input bytes into a base64 encoded
* byte array and then constructs a new String by using the encoded byte
- * array and the {@link java.nio.charset.StandardCharsets.ISO_8859_1 ISO-8859-1}
- * charset.
+ * array and the {@link java.nio.charset.StandardCharsets#ISO_8859_1
+ * ISO-8859-1} charset.
*
* <p> In other words, an invocation of this method has exactly the same
* effect as invoking
@@ -358,9 +358,9 @@
* to encode any more input bytes. The encoding operation can be
* continued, if there is more bytes in input buffer to be encoded,
* by invoking this method again with an output buffer that has more
- * {@linkplain Buffer#remaining remaining} bytes. This is typically
- * done by draining any encoded bytes from the output buffer. The
- * value returned from last invocation needs to be passed in as the
+ * {@linkplain java.nio.Buffer#remaining remaining} bytes. This is
+ * typically done by draining any encoded bytes from the output buffer.
+ * The value returned from last invocation needs to be passed in as the
* third parameter {@code bytesOut} if it is to continue an unfinished
* encoding, 0 otherwise.
*
@@ -806,9 +806,9 @@
* buffer has insufficient space to decode any more input bytes.
* The decoding operation can be continued, if there is more bytes
* in input buffer to be decoded, by invoking this method again with
- * an output buffer that has more {@linkplain Buffer#remaining remaining}
- * bytes.This is typically done by draining any decoded bytes from the
- * output buffer.
+ * an output buffer that has more {@linkplain java.nio.Buffer#remaining
+ * remaining} bytes. This is typically done by draining any decoded
+ * bytes from the output buffer.
*
* <p><b>Recommended Usage Example</b>
* <pre>
--- a/jdk/src/share/classes/java/util/Calendar.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/util/Calendar.java Mon Dec 17 08:30:06 2012 -0500
@@ -53,9 +53,7 @@
import java.text.DateFormatSymbols;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
-import java.util.spi.CalendarDataProvider;
import sun.util.BuddhistCalendar;
-import sun.util.locale.provider.LocaleProviderAdapter;
import sun.util.calendar.ZoneInfo;
import sun.util.locale.provider.CalendarDataUtility;
@@ -746,6 +744,32 @@
/**
* A style specifier for {@link #getDisplayName(int, int, Locale)
* getDisplayName} and {@link #getDisplayNames(int, int, Locale)
+ * getDisplayNames} indicating a narrow name used for format. Narrow names
+ * are typically single character strings, such as "M" for Monday.
+ *
+ * @see #NARROW_STANDALONE
+ * @see #SHORT_FORMAT
+ * @see #LONG_FOTMAT
+ * @since 1.8
+ */
+ public static final int NARROW_FORMAT = 4;
+
+ /**
+ * A style specifier for {@link #getDisplayName(int, int, Locale)
+ * getDisplayName} and {@link #getDisplayNames(int, int, Locale)
+ * getDisplayNames} indicating a narrow name independently. Narrow names
+ * are typically single character strings, such as "M" for Monday.
+ *
+ * @see #NARROW_FORMAT
+ * @see #SHORT_STANDALONE
+ * @see #LONG_STANDALONE
+ * @since 1.8
+ */
+ public static final int NARROW_STANDALONE = NARROW_FORMAT | STANDALONE_MASK;
+
+ /**
+ * A style specifier for {@link #getDisplayName(int, int, Locale)
+ * getDisplayName} and {@link #getDisplayNames(int, int, Locale)
* getDisplayNames} indicating a short name used for format.
*
* @see #SHORT_STANDALONE
@@ -1472,30 +1496,31 @@
* @param style
* the style applied to the string representation; one of {@link
* #SHORT_FORMAT} ({@link #SHORT}), {@link #SHORT_STANDALONE},
- * {@link #LONG_FORMAT} ({@link #LONG}) or {@link #LONG_STANDALONE}.
+ * {@link #LONG_FORMAT} ({@link #LONG}), {@link #LONG_STANDALONE},
+ * {@link #NARROW_FORMAT}, or {@link #NARROW_STANDALONE}.
* @param locale
* the locale for the string representation
* (any calendar types specified by {@code locale} are ignored)
* @return the string representation of the given
- * <code>field</code> in the given <code>style</code>, or
- * <code>null</code> if no string representation is
+ * {@code field} in the given {@code style}, or
+ * {@code null} if no string representation is
* applicable.
* @exception IllegalArgumentException
- * if <code>field</code> or <code>style</code> is invalid,
- * or if this <code>Calendar</code> is non-lenient and any
+ * if {@code field} or {@code style} is invalid,
+ * or if this {@code Calendar} is non-lenient and any
* of the calendar fields have invalid values
* @exception NullPointerException
- * if <code>locale</code> is null
+ * if {@code locale} is null
* @since 1.6
*/
public String getDisplayName(int field, int style, Locale locale) {
- if (!checkDisplayNameParams(field, style, SHORT, LONG, locale,
+ if (!checkDisplayNameParams(field, style, SHORT, NARROW_FORMAT, locale,
ERA_MASK|MONTH_MASK|DAY_OF_WEEK_MASK|AM_PM_MASK)) {
return null;
}
- // the standalone styles are supported only through CalendarDataProviders.
- if (isStandaloneStyle(style)) {
+ // the standalone and narrow styles are supported only through CalendarDataProviders.
+ if (isStandaloneStyle(style) || isNarrowStyle(style)) {
return CalendarDataUtility.retrieveFieldValueName(getCalendarType(),
field, get(field),
style, locale);
@@ -1513,26 +1538,30 @@
}
/**
- * Returns a <code>Map</code> containing all names of the calendar
- * <code>field</code> in the given <code>style</code> and
- * <code>locale</code> and their corresponding field values. For
- * example, if this <code>Calendar</code> is a {@link
+ * Returns a {@code Map} containing all names of the calendar
+ * {@code field} in the given {@code style} and
+ * {@code locale} and their corresponding field values. For
+ * example, if this {@code Calendar} is a {@link
* GregorianCalendar}, the returned map would contain "Jan" to
* {@link #JANUARY}, "Feb" to {@link #FEBRUARY}, and so on, in the
* {@linkplain #SHORT short} style in an English locale.
*
+ * <p>Narrow names may not be unique due to use of single characters,
+ * such as "S" for Sunday and Saturday. In that case narrow names are not
+ * included in the returned {@code Map}.
+ *
* <p>The values of other calendar fields may be taken into
* account to determine a set of display names. For example, if
- * this <code>Calendar</code> is a lunisolar calendar system and
+ * this {@code Calendar} is a lunisolar calendar system and
* the year value given by the {@link #YEAR} field has a leap
* month, this method would return month names containing the leap
* month name, and month names are mapped to their values specific
* for the year.
*
* <p>The default implementation supports display names contained in
- * a {@link DateFormatSymbols}. For example, if <code>field</code>
- * is {@link #MONTH} and <code>style</code> is {@link
- * #ALL_STYLES}, this method returns a <code>Map</code> containing
+ * a {@link DateFormatSymbols}. For example, if {@code field}
+ * is {@link #MONTH} and {@code style} is {@link
+ * #ALL_STYLES}, this method returns a {@code Map} containing
* all strings returned by {@link DateFormatSymbols#getShortMonths()}
* and {@link DateFormatSymbols#getMonths()}.
*
@@ -1541,30 +1570,31 @@
* @param style
* the style applied to the string representation; one of {@link
* #SHORT_FORMAT} ({@link #SHORT}), {@link #SHORT_STANDALONE},
- * {@link #LONG_FORMAT} ({@link #LONG}) or {@link #LONG_STANDALONE}.
+ * {@link #LONG_FORMAT} ({@link #LONG}), {@link #LONG_STANDALONE},
+ * {@link #NARROW_FORMAT}, or {@link #NARROW_STANDALONE}
* @param locale
* the locale for the display names
- * @return a <code>Map</code> containing all display names in
- * <code>style</code> and <code>locale</code> and their
- * field values, or <code>null</code> if no display names
- * are defined for <code>field</code>
+ * @return a {@code Map} containing all display names in
+ * {@code style} and {@code locale} and their
+ * field values, or {@code null} if no display names
+ * are defined for {@code field}
* @exception IllegalArgumentException
- * if <code>field</code> or <code>style</code> is invalid,
- * or if this <code>Calendar</code> is non-lenient and any
+ * if {@code field} or {@code style} is invalid,
+ * or if this {@code Calendar} is non-lenient and any
* of the calendar fields have invalid values
* @exception NullPointerException
- * if <code>locale</code> is null
+ * if {@code locale} is null
* @since 1.6
*/
public Map<String, Integer> getDisplayNames(int field, int style, Locale locale) {
- if (!checkDisplayNameParams(field, style, ALL_STYLES, LONG, locale,
+ if (!checkDisplayNameParams(field, style, ALL_STYLES, NARROW_FORMAT, locale,
ERA_MASK|MONTH_MASK|DAY_OF_WEEK_MASK|AM_PM_MASK)) {
return null;
}
if (style == ALL_STYLES || isStandaloneStyle(style)) {
return CalendarDataUtility.retrieveFieldValueNames(getCalendarType(), field, style, locale);
}
- // SHORT or LONG
+ // SHORT, LONG, or NARROW
return getDisplayNamesImpl(field, style, locale);
}
@@ -1599,6 +1629,12 @@
private String[] getFieldStrings(int field, int style, DateFormatSymbols symbols) {
int baseStyle = getBaseStyle(style); // ignore the standalone mask
+
+ // DateFormatSymbols doesn't support any narrow names.
+ if (baseStyle == NARROW_FORMAT) {
+ return null;
+ }
+
String[] strings = null;
switch (field) {
case ERA:
@@ -1948,6 +1984,10 @@
return (style & STANDALONE_MASK) != 0;
}
+ boolean isNarrowStyle(int style) {
+ return style == NARROW_FORMAT || style == NARROW_STANDALONE;
+ }
+
/**
* Returns the pseudo-time-stamp for two fields, given their
* individual pseudo-time-stamps. If either of the fields
--- a/jdk/src/share/classes/java/util/JapaneseImperialCalendar.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/util/JapaneseImperialCalendar.java Mon Dec 17 08:30:06 2012 -0500
@@ -946,8 +946,9 @@
set(field, getRolledValue(internalGet(field), amount, min, max));
}
+ @Override
public String getDisplayName(int field, int style, Locale locale) {
- if (!checkDisplayNameParams(field, style, SHORT, LONG, locale,
+ if (!checkDisplayNameParams(field, style, SHORT, NARROW_FORMAT, locale,
ERA_MASK|YEAR_MASK|MONTH_MASK|DAY_OF_WEEK_MASK|AM_PM_MASK)) {
return null;
}
@@ -956,11 +957,12 @@
// "GanNen" is supported only in the LONG style.
if (field == YEAR
- && (getBaseStyle(style) == SHORT || fieldValue != 1 || get(ERA) == 0)) {
+ && (getBaseStyle(style) != LONG || fieldValue != 1 || get(ERA) == 0)) {
return null;
}
- String name = CalendarDataUtility.retrieveFieldValueName("japanese", field, fieldValue, style, locale);
+ String name = CalendarDataUtility.retrieveFieldValueName(getCalendarType(), field,
+ fieldValue, style, locale);
// If the ERA value is null, then
// try to get its name or abbreviation from the Era instance.
if (name == null && field == ERA && fieldValue < eras.length) {
@@ -970,27 +972,37 @@
return name;
}
+ @Override
public Map<String,Integer> getDisplayNames(int field, int style, Locale locale) {
- if (!checkDisplayNameParams(field, style, ALL_STYLES, LONG, locale,
+ if (!checkDisplayNameParams(field, style, ALL_STYLES, NARROW_FORMAT, locale,
ERA_MASK|YEAR_MASK|MONTH_MASK|DAY_OF_WEEK_MASK|AM_PM_MASK)) {
return null;
}
- Map<String, Integer> names = CalendarDataUtility.retrieveFieldValueNames("japanese", field, style, locale);
+ Map<String, Integer> names;
+ names = CalendarDataUtility.retrieveFieldValueNames(getCalendarType(), field, style, locale);
// If strings[] has fewer than eras[], get more names from eras[].
- if (field == ERA) {
- int size = names.size();
- if (style == ALL_STYLES) {
- size /= 2; // SHORT and LONG
- }
- if (size < eras.length) {
- int baseStyle = getBaseStyle(style);
- for (int i = size; i < eras.length; i++) {
- Era era = eras[i];
- if (baseStyle == ALL_STYLES || baseStyle == SHORT) {
- names.put(era.getAbbreviation(), i);
+ if (names != null) {
+ if (field == ERA) {
+ int size = names.size();
+ if (style == ALL_STYLES) {
+ Set<Integer> values = new HashSet<>();
+ // count unique era values
+ for (String key : names.keySet()) {
+ values.add(names.get(key));
}
- if (baseStyle == ALL_STYLES || baseStyle == LONG) {
- names.put(era.getName(), i);
+ size = values.size();
+ }
+ if (size < eras.length) {
+ int baseStyle = getBaseStyle(style);
+ for (int i = size; i < eras.length; i++) {
+ Era era = eras[i];
+ if (baseStyle == ALL_STYLES || baseStyle == SHORT
+ || baseStyle == NARROW_FORMAT) {
+ names.put(era.getAbbreviation(), i);
+ }
+ if (baseStyle == ALL_STYLES || baseStyle == LONG) {
+ names.put(era.getName(), i);
+ }
}
}
}
--- a/jdk/src/share/classes/java/util/TimeZone.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/util/TimeZone.java Mon Dec 17 08:30:06 2012 -0500
@@ -43,12 +43,12 @@
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.concurrent.ConcurrentHashMap;
+import sun.misc.JavaAWTAccess;
import sun.misc.SharedSecrets;
-import sun.misc.JavaAWTAccess;
import sun.security.action.GetPropertyAction;
-import sun.util.locale.provider.TimeZoneNameUtility;
import sun.util.calendar.ZoneInfo;
import sun.util.calendar.ZoneInfoFile;
+import sun.util.locale.provider.TimeZoneNameUtility;
/**
* <code>TimeZone</code> represents a time zone offset, and also figures out daylight
@@ -399,28 +399,23 @@
if (style != SHORT && style != LONG) {
throw new IllegalArgumentException("Illegal style: " + style);
}
-
String id = getID();
- String[] names = getDisplayNames(id, locale);
- if (names == null) {
- if (id.startsWith("GMT") && id.length() > 3) {
- char sign = id.charAt(3);
- if (sign == '+' || sign == '-') {
- return id;
- }
- }
- int offset = getRawOffset();
- if (daylight) {
- offset += getDSTSavings();
- }
- return ZoneInfoFile.toCustomID(offset);
+ String name = TimeZoneNameUtility.retrieveDisplayName(id, daylight, style, locale);
+ if (name != null) {
+ return name;
}
- int index = daylight ? 3 : 1;
- if (style == SHORT) {
- index++;
+ if (id.startsWith("GMT") && id.length() > 3) {
+ char sign = id.charAt(3);
+ if (sign == '+' || sign == '-') {
+ return id;
+ }
}
- return names[index];
+ int offset = getRawOffset();
+ if (daylight) {
+ offset += getDSTSavings();
+ }
+ return ZoneInfoFile.toCustomID(offset);
}
private static class DisplayNames {
@@ -429,9 +424,12 @@
// Map(key=id, value=SoftReference(Map(key=locale, value=displaynames)))
private static final Map<String, SoftReference<Map<Locale, String[]>>> CACHE =
new ConcurrentHashMap<>();
+
+ private DisplayNames() {
+ }
}
- private static final String[] getDisplayNames(String id, Locale locale) {
+ private static String[] getDisplayNames(String id, Locale locale) {
Map<String, SoftReference<Map<Locale, String[]>>> displayNames = DisplayNames.CACHE;
SoftReference<Map<Locale, String[]>> ref = displayNames.get(id);
@@ -631,14 +629,14 @@
}
private static synchronized TimeZone setDefaultZone() {
- TimeZone tz = null;
+ TimeZone tz;
// get the time zone ID from the system properties
String zoneID = AccessController.doPrivileged(
new GetPropertyAction("user.timezone"));
// if the time zone ID is not set (yet), perform the
// platform to Java time zone ID mapping.
- if (zoneID == null || zoneID.equals("")) {
+ if (zoneID == null || zoneID.isEmpty()) {
String country = AccessController.doPrivileged(
new GetPropertyAction("user.country"));
String javaHome = AccessController.doPrivileged(
@@ -670,8 +668,9 @@
assert tz != null;
final String id = zoneID;
- AccessController.doPrivileged(new PrivilegedAction<Object>() {
- public Object run() {
+ AccessController.doPrivileged(new PrivilegedAction<Void>() {
+ @Override
+ public Void run() {
System.setProperty("user.timezone", id);
return null;
}
--- a/jdk/src/share/classes/java/util/logging/LogManager.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/util/logging/LogManager.java Mon Dec 17 08:30:06 2012 -0500
@@ -31,10 +31,10 @@
import java.security.*;
import java.lang.ref.ReferenceQueue;
import java.lang.ref.WeakReference;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
import java.beans.PropertyChangeListener;
-import java.beans.PropertyChangeEvent;
-import java.net.URL;
-import sun.security.action.GetPropertyAction;
/**
* There is a single global LogManager object that is used to
@@ -150,7 +150,7 @@
// The map of the registered listeners. The map value is the registration
// count to allow for cases where the same listener is registered many times.
- private final Map<PropertyChangeListener,Integer> listenerMap = new HashMap<>();
+ private final Map<Object,Integer> listenerMap = new HashMap<>();
// Table of named Loggers that maps names to Loggers.
private Hashtable<String,LoggerWeakRef> namedLoggers = new Hashtable<>();
@@ -243,7 +243,7 @@
* Protected constructor. This is protected so that container applications
* (such as J2EE containers) can subclass the object. It is non-public as
* it is intended that there only be one LogManager object, whose value is
- * retrieved by calling Logmanager.getLogManager.
+ * retrieved by calling LogManager.getLogManager.
*/
protected LogManager() {
// Add a shutdown hook to close the global handlers.
@@ -971,22 +971,24 @@
// Notify any interested parties that our properties have changed.
// We first take a copy of the listener map so that we aren't holding any
// locks when calling the listeners.
- Map<PropertyChangeListener,Integer> listeners = null;
+ Map<Object,Integer> listeners = null;
synchronized (listenerMap) {
if (!listenerMap.isEmpty())
listeners = new HashMap<>(listenerMap);
}
if (listeners != null) {
- PropertyChangeEvent ev = new PropertyChangeEvent(LogManager.class, null, null, null);
- for (Map.Entry<PropertyChangeListener,Integer> entry : listeners.entrySet()) {
- PropertyChangeListener listener = entry.getKey();
+ assert Beans.isBeansPresent();
+ Object ev = Beans.newPropertyChangeEvent(LogManager.class, null, null, null);
+ for (Map.Entry<Object,Integer> entry : listeners.entrySet()) {
+ Object listener = entry.getKey();
int count = entry.getValue().intValue();
for (int i = 0; i < count; i++) {
- listener.propertyChange(ev);
+ Beans.invokePropertyChange(listener, ev);
}
}
}
+
// Note that we need to reinitialize global handles when
// they are first referenced.
synchronized (this) {
@@ -1269,4 +1271,100 @@
return loggingMXBean;
}
+ /**
+ * A class that provides access to the java.beans.PropertyChangeListener
+ * and java.beans.PropertyChangeEvent without creating a static dependency
+ * on java.beans. This class can be removed once the addPropertyChangeListener
+ * and removePropertyChangeListener methods are removed.
+ */
+ private static class Beans {
+ private static final Class<?> propertyChangeListenerClass =
+ getClass("java.beans.PropertyChangeListener");
+
+ private static final Class<?> propertyChangeEventClass =
+ getClass("java.beans.PropertyChangeEvent");
+
+ private static final Method propertyChangeMethod =
+ getMethod(propertyChangeListenerClass,
+ "propertyChange",
+ propertyChangeEventClass);
+
+ private static final Constructor<?> propertyEventCtor =
+ getConstructor(propertyChangeEventClass,
+ Object.class,
+ String.class,
+ Object.class,
+ Object.class);
+
+ private static Class<?> getClass(String name) {
+ try {
+ return Class.forName(name, true, Beans.class.getClassLoader());
+ } catch (ClassNotFoundException e) {
+ return null;
+ }
+ }
+ private static Constructor<?> getConstructor(Class<?> c, Class<?>... types) {
+ try {
+ return (c == null) ? null : c.getDeclaredConstructor(types);
+ } catch (NoSuchMethodException x) {
+ throw new AssertionError(x);
+ }
+ }
+
+ private static Method getMethod(Class<?> c, String name, Class<?>... types) {
+ try {
+ return (c == null) ? null : c.getMethod(name, types);
+ } catch (NoSuchMethodException e) {
+ throw new AssertionError(e);
+ }
+ }
+
+ /**
+ * Returns {@code true} if java.beans is present.
+ */
+ static boolean isBeansPresent() {
+ return propertyChangeListenerClass != null &&
+ propertyChangeEventClass != null;
+ }
+
+ /**
+ * Returns a new PropertyChangeEvent with the given source, property
+ * name, old and new values.
+ */
+ static Object newPropertyChangeEvent(Object source, String prop,
+ Object oldValue, Object newValue)
+ {
+ try {
+ return propertyEventCtor.newInstance(source, prop, oldValue, newValue);
+ } catch (InstantiationException | IllegalAccessException x) {
+ throw new AssertionError(x);
+ } catch (InvocationTargetException x) {
+ Throwable cause = x.getCause();
+ if (cause instanceof Error)
+ throw (Error)cause;
+ if (cause instanceof RuntimeException)
+ throw (RuntimeException)cause;
+ throw new AssertionError(x);
+ }
+ }
+
+ /**
+ * Invokes the given PropertyChangeListener's propertyChange method
+ * with the given event.
+ */
+ static void invokePropertyChange(Object listener, Object ev) {
+ try {
+ propertyChangeMethod.invoke(listener, ev);
+ } catch (IllegalAccessException x) {
+ throw new AssertionError(x);
+ } catch (InvocationTargetException x) {
+ Throwable cause = x.getCause();
+ if (cause instanceof Error)
+ throw (Error)cause;
+ if (cause instanceof RuntimeException)
+ throw (RuntimeException)cause;
+ throw new AssertionError(x);
+ }
+ }
+ }
}
--- a/jdk/src/share/classes/java/util/spi/CalendarNameProvider.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/util/spi/CalendarNameProvider.java Mon Dec 17 08:30:06 2012 -0500
@@ -174,7 +174,8 @@
* <p>{@code style} gives the style of the string representation. It is one
* of {@link Calendar#SHORT_FORMAT} ({@link Calendar#SHORT SHORT}),
* {@link Calendar#SHORT_STANDALONE}, {@link Calendar#LONG_FORMAT}
- * ({@link Calendar#LONG LONG}), or {@link Calendar#LONG_STANDALONE}.
+ * ({@link Calendar#LONG LONG}), {@link Calendar#LONG_STANDALONE},
+ * {@link Calendar#NARROW_FORMAT}, or {@link Calendar#NARROW_STANDALONE}.
*
* <p>For example, the following call will return {@code "Sunday"}.
* <pre>
@@ -195,8 +196,10 @@
* the string representation style: one of {@link
* Calendar#SHORT_FORMAT} ({@link Calendar#SHORT SHORT}),
* {@link Calendar#SHORT_STANDALONE}, {@link
- * Calendar#LONG_FORMAT} ({@link Calendar#LONG LONG}), or
- * {@link Calendar#LONG_STANDALONE}
+ * Calendar#LONG_FORMAT} ({@link Calendar#LONG LONG}),
+ * {@link Calendar#LONG_STANDALONE},
+ * {@link Calendar#NARROW_FORMAT},
+ * or {@link Calendar#NARROW_STANDALONE}
* @param locale
* the desired locale
* @return the string representation of the {@code field value}, or {@code
@@ -226,8 +229,11 @@
* <p>{@code style} gives the style of the string representation. It must be
* one of {@link Calendar#ALL_STYLES}, {@link Calendar#SHORT_FORMAT} ({@link
* Calendar#SHORT SHORT}), {@link Calendar#SHORT_STANDALONE}, {@link
- * Calendar#LONG_FORMAT} ({@link Calendar#LONG LONG}), or {@link
- * Calendar#LONG_STANDALONE}.
+ * Calendar#LONG_FORMAT} ({@link Calendar#LONG LONG}), {@link
+ * Calendar#LONG_STANDALONE}, {@link Calendar#NARROW_FORMAT}, or
+ * {@link Calendar#NARROW_STANDALONE}. Note that narrow names may
+ * not be unique due to use of single characters, such as "S" for Sunday
+ * and Saturday, and that no narrow names are included in that case.
*
* <p>For example, the following call will return a {@code Map} containing
* {@code "January"} to {@link Calendar#JANUARY}, {@code "Jan"} to {@link
@@ -247,8 +253,9 @@
* {@link Calendar#ALL_STYLES}, {@link Calendar#SHORT_FORMAT}
* ({@link Calendar#SHORT SHORT}), {@link
* Calendar#SHORT_STANDALONE}, {@link Calendar#LONG_FORMAT}
- * ({@link Calendar#LONG LONG}), or {@link
- * Calendar#LONG_STANDALONE}.
+ * ({@link Calendar#LONG LONG}), {@link Calendar#LONG_STANDALONE},
+ * {@link Calendar#NARROW_FORMAT},
+ * or {@link Calendar#NARROW_STANDALONE}
* @param locale
* the desired locale
* @return a {@code Map} containing all display names of {@code field} in
--- a/jdk/src/share/classes/java/util/spi/TimeZoneNameProvider.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/java/util/spi/TimeZoneNameProvider.java Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -77,4 +77,34 @@
* @see java.util.TimeZone#getDisplayName(boolean, int, java.util.Locale)
*/
public abstract String getDisplayName(String ID, boolean daylight, int style, Locale locale);
+
+ /**
+ * Returns a generic name for the given time zone {@code ID} that's suitable
+ * for presentation to the user in the specified {@code locale}. Generic
+ * time zone names are neutral from standard time and daylight saving
+ * time. For example, "PT" is the short generic name of time zone ID {@code
+ * America/Los_Angeles}, while its short standard time and daylight saving
+ * time names are "PST" and "PDT", respectively. Refer to
+ * {@link #getDisplayName(String, boolean, int, Locale) getDisplayName}
+ * for valid time zone IDs.
+ *
+ * <p>The default implementation of this method returns {@code null}.
+ *
+ * @param ID a time zone ID string
+ * @param style either {@link java.util.TimeZone#LONG TimeZone.LONG} or
+ * {@link java.util.TimeZone#SHORT TimeZone.SHORT}
+ * @param locale the desired locale
+ * @return the human-readable generic name of the given time zone in the
+ * given locale, or {@code null} if it's not available.
+ * @exception IllegalArgumentException if <code>style</code> is invalid,
+ * or <code>locale</code> isn't one of the locales returned from
+ * {@link LocaleServiceProvider#getAvailableLocales()
+ * getAvailableLocales()}.
+ * @exception NullPointerException if <code>ID</code> or <code>locale</code>
+ * is {@code null}
+ * @since 1.8
+ */
+ public String getGenericDisplayName(String ID, int style, Locale locale) {
+ return null;
+ }
}
--- a/jdk/src/share/classes/javax/imageio/metadata/doc-files/gif_metadata.html Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/javax/imageio/metadata/doc-files/gif_metadata.html Mon Dec 17 08:30:06 2012 -0500
@@ -90,7 +90,7 @@
<!-- Max value: 8 (inclusive) -->
<!ATTLIST "LogicalScreenDescriptor" "pixelAspectRatio" #CDATA
#REQUIRED>
- <!-- If 0, indicates square pixels, else W/H = (value + 16)/64 -->
+ <!-- If 0, indicates square pixels, else W/H = (value + 15)/64 -->
<!-- Data type: Integer -->
<!-- Min value: 0 (inclusive) -->
<!-- Max value: 255 (inclusive) -->
--- a/jdk/src/share/classes/javax/net/ssl/KeyManagerFactory.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/javax/net/ssl/KeyManagerFactory.java Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -53,17 +53,13 @@
* Obtains the default KeyManagerFactory algorithm name.
*
* <p>The default algorithm can be changed at runtime by setting
- * the value of the "ssl.KeyManagerFactory.algorithm" security
- * property (set in the Java security properties file or by calling
- * {@link java.security.Security#setProperty(java.lang.String,
- * java.lang.String)})
- * to the desired algorithm name.
+ * the value of the {@code ssl.KeyManagerFactory.algorithm}
+ * security property to the desired algorithm name.
*
- * @see java.security.Security#setProperty(java.lang.String,
- * java.lang.String)
- * @return the default algorithm name as specified in the
- * Java security properties, or an implementation-specific
- * default if no such property exists.
+ * @see java.security.Security security properties
+ * @return the default algorithm name as specified by the
+ * {@code ssl.KeyManagerFactory.algorithm} security property, or an
+ * implementation-specific default if no such property exists.
*/
public final static String getDefaultAlgorithm() {
String type;
--- a/jdk/src/share/classes/javax/net/ssl/TrustManagerFactory.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/javax/net/ssl/TrustManagerFactory.java Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -53,14 +53,13 @@
* Obtains the default TrustManagerFactory algorithm name.
*
* <p>The default TrustManager can be changed at runtime by setting
- * the value of the "ssl.TrustManagerFactory.algorithm" security
- * property (set in the Java security properties file or by calling
- * {@link java.security.Security#setProperty(String, String) })
- * to the desired algorithm name.
+ * the value of the {@code ssl.TrustManagerFactory.algorithm}
+ * security property to the desired algorithm name.
*
- * @return the default algorithm name as specified in the
- * Java security properties, or an implementation-specific default
- * if no such property exists.
+ * @see java.security.Security security properties
+ * @return the default algorithm name as specified by the
+ * {@code ssl.TrustManagerFactory.algorithm} security property, or an
+ * implementation-specific default if no such property exists.
*/
public final static String getDefaultAlgorithm() {
String type;
--- a/jdk/src/share/classes/javax/security/auth/Policy.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/javax/security/auth/Policy.java Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -126,14 +126,9 @@
* are rejected. Such permission must be listed in the
* <code>java.security.Policy</code>.
*
- * <p> The default <code>Policy</code> implementation can be changed by
- * setting the value of the "auth.policy.provider" security property
- * (in the Java security properties file) to the fully qualified name of
- * the desired <code>Policy</code> implementation class.
- * The Java security properties file is located in the file named
- * <JAVA_HOME>/lib/security/java.security.
- * <JAVA_HOME> refers to the value of the java.home system property,
- * and specifies the directory where the JRE is installed.
+ * <p> The default {@code Policy} implementation can be changed by
+ * setting the value of the {@code auth.policy.provider} security property to
+ * the fully qualified name of the desired {@code Policy} implementation class.
*
* @deprecated as of JDK version 1.4 -- Replaced by java.security.Policy.
* java.security.Policy has a method:
@@ -154,7 +149,7 @@
* These two APIs provide callers the means to query the
* Policy for Principal-based Permission entries.
*
- *
+ * @see java.security.Security security properties
*/
@Deprecated
public abstract class Policy {
--- a/jdk/src/share/classes/javax/security/auth/callback/CallbackHandler.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/javax/security/auth/callback/CallbackHandler.java Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,13 +49,9 @@
* can then choose to prompt for a username and password serially,
* or to prompt for both in a single window.
*
- * <p> A default <code>CallbackHandler</code> class implementation
- * may be specified in the <i>auth.login.defaultCallbackHandler</i>
- * security property. The security property can be set
- * in the Java security properties file located in the file named
- * <JAVA_HOME>/lib/security/java.security.
- * <JAVA_HOME> refers to the value of the java.home system property,
- * and specifies the directory where the JRE is installed.
+ * <p> A default {@code CallbackHandler} class implementation
+ * may be specified by setting the value of the
+ * {@code auth.login.defaultCallbackHandler} security property.
*
* <p> If the security property is set to the fully qualified name of a
* <code>CallbackHandler</code> implementation class,
@@ -67,6 +63,7 @@
* <p> All default handler implementations must provide a public
* zero-argument constructor.
*
+ * @see java.security.Security security properties
*/
public interface CallbackHandler {
--- a/jdk/src/share/classes/javax/security/auth/login/Configuration.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/javax/security/auth/login/Configuration.java Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -173,12 +173,8 @@
* Configuration implementation (a default subclass implementation of this
* abstract class).
* The default Configuration implementation can be changed by setting the value
- * of the "login.configuration.provider" security property (in the Java
- * security properties file) to the fully qualified name of the desired
- * Configuration subclass implementation. The Java security properties file
- * is located in the file named <JAVA_HOME>/lib/security/java.security.
- * <JAVA_HOME> refers to the value of the java.home system property,
- * and specifies the directory where the JRE is installed.
+ * of the {@code login.configuration.provider} security property to the fully
+ * qualified name of the desired Configuration subclass implementation.
*
* <p> Application code can directly subclass Configuration to provide a custom
* implementation. In addition, an instance of a Configuration object can be
@@ -190,6 +186,7 @@
* for a list of standard Configuration types.
*
* @see javax.security.auth.login.LoginContext
+ * @see java.security.Security security properties
*/
public abstract class Configuration {
--- a/jdk/src/share/classes/javax/security/auth/login/LoginContext.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/javax/security/auth/login/LoginContext.java Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -163,9 +163,9 @@
* input parameter, or if the caller specifies a <code>null</code>
* CallbackHandler object (and a <code>null</code> value is permitted),
* the LoginContext queries the
- * <i>auth.login.defaultCallbackHandler</i> security property
- * for the fully qualified class name of a default handler implementation.
- * If the security property is not set,
+ * {@code auth.login.defaultCallbackHandler} security property for the
+ * fully qualified class name of a default handler
+ * implementation. If the security property is not set,
* then the underlying modules will not have a
* CallbackHandler for use in communicating
* with users. The caller thus assumes that the configured
@@ -184,21 +184,13 @@
* </ul>
* </ol>
*
- * <p> Note that Security Properties
- * (such as <code>auth.login.defaultCallbackHandler</code>)
- * can be set programmatically via the
- * <code>java.security.Security</code> class,
- * or statically in the Java security properties file located in the
- * file named <JAVA_HOME>/lib/security/java.security.
- * <JAVA_HOME> refers to the value of the java.home system property,
- * and specifies the directory where the JRE is installed.
- *
* @see java.security.Security
* @see javax.security.auth.AuthPermission
* @see javax.security.auth.Subject
* @see javax.security.auth.callback.CallbackHandler
* @see javax.security.auth.login.Configuration
* @see javax.security.auth.spi.LoginModule
+ * @see java.security.Security security properties
*/
public class LoginContext {
--- a/jdk/src/share/classes/javax/security/cert/X509Certificate.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/javax/security/cert/X509Certificate.java Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -96,21 +96,17 @@
* </pre>
* <p>
* In either case, the code that instantiates an X.509 certificate
- * consults the Java security properties file to locate the actual
- * implementation or instantiates a default implementation.
+ * consults the value of the {@code cert.provider.x509v1} security property
+ * to locate the actual implementation or instantiates a default implementation.
* <p>
- * The Java security properties file is located in the file named
- * <JAVA_HOME>/lib/security/java.security.
- * <JAVA_HOME> refers to the value of the java.home system property,
- * and specifies the directory where the JRE is installed.
- * In the Security properties file, a default implementation
- * for X.509 v1 may be given such as:
+ * The {@code cert.provider.x509v1} property is set to a default
+ * implementation for X.509 such as:
* <pre>
* cert.provider.x509v1=com.sun.security.cert.internal.x509.X509V1CertImpl
* </pre>
* <p>
- * The value of this <code>cert.provider.x509v1</code> property has to be
- * changed to instatiate another implementation. If this security
+ * The value of this {@code cert.provider.x509v1} property has to be
+ * changed to instantiate another implementation. If this security
* property is not set, a default implementation will be used.
* Currently, due to possible security restrictions on access to
* Security properties, this value is looked up and cached at class
@@ -127,6 +123,7 @@
* @since 1.4
* @see Certificate
* @see java.security.cert.X509Extension
+ * @see java.security.Security security properties
*/
public abstract class X509Certificate extends Certificate {
@@ -156,8 +153,7 @@
* the data read from the input stream <code>inStream</code>.
* The implementation (X509Certificate is an abstract class) is
* provided by the class specified as the value of the
- * <code>cert.provider.x509v1</code>
- * property in the security properties file.
+ * {@code cert.provider.x509v1} security property.
*
* <p>Note: Only one DER-encoded
* certificate is expected to be in the input stream.
@@ -184,8 +180,7 @@
* the specified byte array.
* The implementation (X509Certificate is an abstract class) is
* provided by the class specified as the value of the
- * <code>cert.provider.x509v1</code>
- * property in the security properties file.
+ * {@code cert.provider.x509v1} security property.
*
* <p>Note: All X509Certificate
* subclasses must provide a constructor of the form:
--- a/jdk/src/share/classes/javax/sql/rowset/serial/SerialArray.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/javax/sql/rowset/serial/SerialArray.java Mon Dec 17 08:30:06 2012 -0500
@@ -31,6 +31,7 @@
import java.net.URL;
import java.util.Arrays;
+
/**
* A serialized version of an <code>Array</code>
* object, which is the mapping in the Java programming language of an SQL
@@ -41,44 +42,52 @@
* methods for getting the base type and the SQL name for the base type, and
* methods for copying all or part of a <code>SerialArray</code> object.
* <P>
+ *
* Note: In order for this class to function correctly, a connection to the
* data source
* must be available in order for the SQL <code>Array</code> object to be
* materialized (have all of its elements brought to the client server)
* if necessary. At this time, logical pointers to the data in the data source,
* such as locators, are not currently supported.
+ *
+ * <h4> Thread safety </h4>
+ *
+ * A SerialArray is not safe for use by multiple concurrent threads. If a
+ * SerialArray is to be used by more than one thread then access to the
+ * SerialArray should be controlled by appropriate synchronization.
+ *
*/
public class SerialArray implements Array, Serializable, Cloneable {
- /**
- * A serialized array in which each element is an <code>Object</code>
- * in the Java programming language that represents an element
- * in the SQL <code>ARRAY</code> value.
- * @serial
- */
+ /**
+ * A serialized array in which each element is an <code>Object</code>
+ * in the Java programming language that represents an element
+ * in the SQL <code>ARRAY</code> value.
+ * @serial
+ */
private Object[] elements;
- /**
- * The SQL type of the elements in this <code>SerialArray</code> object. The
- * type is expressed as one of the constants from the class
- * <code>java.sql.Types</code>.
- * @serial
- */
+ /**
+ * The SQL type of the elements in this <code>SerialArray</code> object. The
+ * type is expressed as one of the constants from the class
+ * <code>java.sql.Types</code>.
+ * @serial
+ */
private int baseType;
- /**
- * The type name used by the DBMS for the elements in the SQL <code>ARRAY</code>
- * value that this <code>SerialArray</code> object represents.
- * @serial
- */
+ /**
+ * The type name used by the DBMS for the elements in the SQL <code>ARRAY</code>
+ * value that this <code>SerialArray</code> object represents.
+ * @serial
+ */
private String baseTypeName;
- /**
- * The number of elements in this <code>SerialArray</code> object, which
- * is also the number of elements in the SQL <code>ARRAY</code> value
- * that this <code>SerialArray</code> object represents.
- * @serial
- */
+ /**
+ * The number of elements in this <code>SerialArray</code> object, which
+ * is also the number of elements in the SQL <code>ARRAY</code> value
+ * that this <code>SerialArray</code> object represents.
+ * @serial
+ */
private int len;
/**
@@ -192,24 +201,19 @@
}
/**
- * This method frees the <code>Array</code> object and releases the resources that
- * it holds. The object is invalid once the <code>free</code>
- * method is called.
- *<p>
- * After <code>free</code> has been called, any attempt to invoke a
- * method other than <code>free</code> will result in a <code>SQLException</code>
- * being thrown. If <code>free</code> is called multiple times, the subsequent
- * calls to <code>free</code> are treated as a no-op.
- *<p>
+ * This method frees the {@code SeriableArray} object and releases the
+ * resources that it holds. The object is invalid once the {@code free}
+ * method is called. <p> If {@code free} is called multiple times, the
+ * subsequent calls to {@code free} are treated as a no-op. </P>
*
- * @throws SQLException if an error occurs releasing
- * the Array's resources
- * @exception SQLFeatureNotSupportedException if the JDBC driver does not support
- * this method
+ * @throws SQLException if an error occurs releasing the SerialArray's resources
* @since 1.6
*/
public void free() throws SQLException {
- throw new SQLFeatureNotSupportedException("Feature not supported");
+ if (elements != null) {
+ elements = null;
+ baseTypeName= null;
+ }
}
/**
@@ -292,129 +296,140 @@
}
- /**
- * Returns a new array that is a copy of this <code>SerialArray</code>
- * object.
- *
- * @return a copy of this <code>SerialArray</code> object as an
- * <code>Object</code> in the Java programming language
- * @throws SerialException if an error occurs retrieving a copy of
- * this <code>SerialArray</code> object
- */
+ /**
+ * Returns a new array that is a copy of this <code>SerialArray</code>
+ * object.
+ *
+ * @return a copy of this <code>SerialArray</code> object as an
+ * <code>Object</code> in the Java programming language
+ * @throws SerialException if an error occurs;
+ * if {@code free} had previously been called on this object
+ */
public Object getArray() throws SerialException {
+ isValid();
Object dst = new Object[len];
System.arraycopy((Object)elements, 0, dst, 0, len);
return dst;
}
//[if an error occurstype map used??]
- /**
- * Returns a new array that is a copy of this <code>SerialArray</code>
- * object, using the given type map for the custom
- * mapping of each element when the elements are SQL UDTs.
- * <P>
- * This method does custom mapping if the array elements are a UDT
- * and the given type map has an entry for that UDT.
+ /**
+ * Returns a new array that is a copy of this <code>SerialArray</code>
+ * object, using the given type map for the custom
+ * mapping of each element when the elements are SQL UDTs.
+ * <P>
+ * This method does custom mapping if the array elements are a UDT
+ * and the given type map has an entry for that UDT.
* Custom mapping is recursive,
- * meaning that if, for instance, an element of an SQL structured type
- * is an SQL structured type that itself has an element that is an SQL
- * structured type, each structured type that has a custom mapping will be
- * mapped according to the given type map.
- *
+ * meaning that if, for instance, an element of an SQL structured type
+ * is an SQL structured type that itself has an element that is an SQL
+ * structured type, each structured type that has a custom mapping will be
+ * mapped according to the given type map.
+ *
* @param map a <code>java.util.Map</code> object in which
* each entry consists of 1) a <code>String</code> object
* giving the fully qualified name of a UDT and 2) the
* <code>Class</code> object for the <code>SQLData</code> implementation
* that defines how the UDT is to be mapped
- * @return a copy of this <code>SerialArray</code> object as an
- * <code>Object</code> in the Java programming language
- * @throws SerialException if an error occurs
- */
+ * @return a copy of this <code>SerialArray</code> object as an
+ * <code>Object</code> in the Java programming language
+ * @throws SerialException if an error occurs;
+ * if {@code free} had previously been called on this object
+ */
public Object getArray(Map<String, Class<?>> map) throws SerialException {
+ isValid();
Object dst[] = new Object[len];
System.arraycopy((Object)elements, 0, dst, 0, len);
return dst;
}
- /**
- * Returns a new array that is a copy of a slice
- * of this <code>SerialArray</code> object, starting with the
- * element at the given index and containing the given number
- * of consecutive elements.
- *
- * @param index the index into this <code>SerialArray</code> object
- * of the first element to be copied;
- * the index of the first element is <code>0</code>
- * @param count the number of consecutive elements to be copied, starting
- * at the given index
- * @return a copy of the designated elements in this <code>SerialArray</code>
- * object as an <code>Object</code> in the Java programming language
- * @throws SerialException if an error occurs
- */
+ /**
+ * Returns a new array that is a copy of a slice
+ * of this <code>SerialArray</code> object, starting with the
+ * element at the given index and containing the given number
+ * of consecutive elements.
+ *
+ * @param index the index into this <code>SerialArray</code> object
+ * of the first element to be copied;
+ * the index of the first element is <code>0</code>
+ * @param count the number of consecutive elements to be copied, starting
+ * at the given index
+ * @return a copy of the designated elements in this <code>SerialArray</code>
+ * object as an <code>Object</code> in the Java programming language
+ * @throws SerialException if an error occurs;
+ * if {@code free} had previously been called on this object
+ */
public Object getArray(long index, int count) throws SerialException {
+ isValid();
Object dst = new Object[count];
System.arraycopy((Object)elements, (int)index, dst, 0, count);
return dst;
}
- /**
- * Returns a new array that is a copy of a slice
- * of this <code>SerialArray</code> object, starting with the
- * element at the given index and containing the given number
- * of consecutive elements.
- * <P>
- * This method does custom mapping if the array elements are a UDT
- * and the given type map has an entry for that UDT.
+ /**
+ * Returns a new array that is a copy of a slice
+ * of this <code>SerialArray</code> object, starting with the
+ * element at the given index and containing the given number
+ * of consecutive elements.
+ * <P>
+ * This method does custom mapping if the array elements are a UDT
+ * and the given type map has an entry for that UDT.
* Custom mapping is recursive,
- * meaning that if, for instance, an element of an SQL structured type
- * is an SQL structured type that itself has an element that is an SQL
- * structured type, each structured type that has a custom mapping will be
- * mapped according to the given type map.
- *
- * @param index the index into this <code>SerialArray</code> object
- * of the first element to be copied; the index of the
- * first element in the array is <code>0</code>
- * @param count the number of consecutive elements to be copied, starting
- * at the given index
+ * meaning that if, for instance, an element of an SQL structured type
+ * is an SQL structured type that itself has an element that is an SQL
+ * structured type, each structured type that has a custom mapping will be
+ * mapped according to the given type map.
+ *
+ * @param index the index into this <code>SerialArray</code> object
+ * of the first element to be copied; the index of the
+ * first element in the array is <code>0</code>
+ * @param count the number of consecutive elements to be copied, starting
+ * at the given index
* @param map a <code>java.util.Map</code> object in which
* each entry consists of 1) a <code>String</code> object
* giving the fully qualified name of a UDT and 2) the
* <code>Class</code> object for the <code>SQLData</code> implementation
* that defines how the UDT is to be mapped
- * @return a copy of the designated elements in this <code>SerialArray</code>
- * object as an <code>Object</code> in the Java programming language
- * @throws SerialException if an error occurs
- */
+ * @return a copy of the designated elements in this <code>SerialArray</code>
+ * object as an <code>Object</code> in the Java programming language
+ * @throws SerialException if an error occurs;
+ * if {@code free} had previously been called on this object
+ */
public Object getArray(long index, int count, Map<String,Class<?>> map)
throws SerialException
{
+ isValid();
Object dst = new Object[count];
System.arraycopy((Object)elements, (int)index, dst, 0, count);
return dst;
}
- /**
- * Retrieves the SQL type of the elements in this <code>SerialArray</code>
- * object. The <code>int</code> returned is one of the constants in the class
- * <code>java.sql.Types</code>.
- *
- * @return one of the constants in <code>java.sql.Types</code>, indicating
- * the SQL type of the elements in this <code>SerialArray</code> object
- * @throws SerialException if an error occurs
- */
+ /**
+ * Retrieves the SQL type of the elements in this <code>SerialArray</code>
+ * object. The <code>int</code> returned is one of the constants in the class
+ * <code>java.sql.Types</code>.
+ *
+ * @return one of the constants in <code>java.sql.Types</code>, indicating
+ * the SQL type of the elements in this <code>SerialArray</code> object
+ * @throws SerialException if an error occurs;
+ * if {@code free} had previously been called on this object
+ */
public int getBaseType() throws SerialException {
+ isValid();
return baseType;
}
- /**
- * Retrieves the DBMS-specific type name for the elements in this
- * <code>SerialArray</code> object.
- *
- * @return the SQL type name used by the DBMS for the base type of this
+ /**
+ * Retrieves the DBMS-specific type name for the elements in this
+ * <code>SerialArray</code> object.
+ *
+ * @return the SQL type name used by the DBMS for the base type of this
* <code>SerialArray</code> object
- * @throws SerialException if an error occurs
- */
+ * @throws SerialException if an error occurs;
+ * if {@code free} had previously been called on this object
+ */
public String getBaseTypeName() throws SerialException {
+ isValid();
return baseTypeName;
}
@@ -434,11 +449,13 @@
* @return a <code>ResultSet</code> object containing the designated
* elements in this <code>SerialArray</code> object, with a
* separate row for each element
- * @throws SerialException, which in turn throws an
- * <code>UnsupportedOperationException</code>, if this method is called
+ * @throws SerialException if called with the cause set to
+ * {@code UnsupportedOperationException}
*/
public ResultSet getResultSet(long index, int count) throws SerialException {
- throw new UnsupportedOperationException();
+ SerialException se = new SerialException();
+ se.initCause(new UnsupportedOperationException());
+ throw se;
}
/**
@@ -461,13 +478,15 @@
* @return a <code>ResultSet</code> object containing all of the
* elements in this <code>SerialArray</code> object, with a
* separate row for each element
- * @throws SerialException, which in turn throws an
- * <code>UnsupportedOperationException</code>, if this method is called
+ * @throws SerialException if called with the cause set to
+ * {@code UnsupportedOperationException}
*/
public ResultSet getResultSet(Map<String, Class<?>> map)
throws SerialException
{
- throw new UnsupportedOperationException();
+ SerialException se = new SerialException();
+ se.initCause(new UnsupportedOperationException());
+ throw se;
}
/**
@@ -480,11 +499,13 @@
* @return a <code>ResultSet</code> object containing all of the
* elements in this <code>SerialArray</code> object, with a
* separate row for each element
- * @throws SerialException if called, which in turn throws an
- * <code>UnsupportedOperationException</code>, if this method is called
+ * @throws SerialException if called with the cause set to
+ * {@code UnsupportedOperationException}
*/
public ResultSet getResultSet() throws SerialException {
- throw new UnsupportedOperationException();
+ SerialException se = new SerialException();
+ se.initCause(new UnsupportedOperationException());
+ throw se;
}
@@ -514,16 +535,19 @@
* @return a <code>ResultSet</code> object containing the designated
* elements in this <code>SerialArray</code> object, with a
* separate row for each element
- * @throws SerialException if called, which in turn throws an
- * <code>UnsupportedOperationException</code>
+ * @throws SerialException if called with the cause set to
+ * {@code UnsupportedOperationException}
*/
public ResultSet getResultSet(long index, int count,
Map<String,Class<?>> map)
throws SerialException
{
- throw new UnsupportedOperationException();
+ SerialException se = new SerialException();
+ se.initCause(new UnsupportedOperationException());
+ throw se;
}
+
/**
* Compares this SerialArray to the specified object. The result is {@code
* true} if and only if the argument is not {@code null} and is a {@code
@@ -566,12 +590,12 @@
* reference to a clone of the underlying objects array, not a reference
* to the original underlying object array of this {@code SerialArray} object.
*
- * @return a clone of this SerialArray
+ * @return a clone of this SerialArray
*/
public Object clone() {
try {
SerialArray sa = (SerialArray) super.clone();
- sa.elements = Arrays.copyOf(elements, len);
+ sa.elements = (elements != null) ? Arrays.copyOf(elements, len) : null;
return sa;
} catch (CloneNotSupportedException ex) {
// this shouldn't happen, since we are Cloneable
@@ -616,6 +640,19 @@
}
/**
+ * Check to see if this object had previously had its {@code free} method
+ * called
+ *
+ * @throws SerialException
+ */
+ private void isValid() throws SerialException {
+ if (elements == null) {
+ throw new SerialException("Error: You cannot call a method on a "
+ + "SerialArray instance once free() has been called.");
+ }
+ }
+
+ /**
* The identifier that assists in the serialization of this <code>SerialArray</code>
* object.
*/
--- a/jdk/src/share/classes/javax/sql/rowset/serial/SerialBlob.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/javax/sql/rowset/serial/SerialBlob.java Mon Dec 17 08:30:06 2012 -0500
@@ -51,6 +51,12 @@
* <code>Blob</code> object within a <code>SerialBlob</code> object
* and to update or truncate a <code>Blob</code> object.
*
+ * <h4> Thread safety </h4>
+ *
+ * <p> A SerialBlob is not safe for use by multiple concurrent threads. If a
+ * SerialBlob is to be used by more than one thread then access to the SerialBlob
+ * should be controlled by appropriate synchronization.
+ *
* @author Jonathan Bruce
*/
public class SerialBlob implements Blob, Serializable, Cloneable {
@@ -76,7 +82,7 @@
private long len;
/**
- * The orginal number of bytes in this <code>SerialBlob</code> object's
+ * The original number of bytes in this <code>SerialBlob</code> object's
* array of bytes when it was first established.
* @serial
*/
@@ -160,9 +166,11 @@
* @return an array of bytes that is a copy of a region of this
* <code>SerialBlob</code> object, starting at the given
* position and containing the given number of consecutive bytes
- * @throws SerialException if the given starting position is out of bounds
+ * @throws SerialException if the given starting position is out of bounds;
+ * if {@code free} had previously been called on this object
*/
public byte[] getBytes(long pos, int length) throws SerialException {
+ isValid();
if (length > len) {
length = (int)len;
}
@@ -189,9 +197,11 @@
*
* @return a <code>long</code> indicating the length in bytes of this
* <code>SerialBlob</code> object's array of bytes
- * @throws SerialException if an error occurs
+ * @throws SerialException if an error occurs;
+ * if {@code free} had previously been called on this object
*/
public long length() throws SerialException {
+ isValid();
return len;
}
@@ -203,12 +213,14 @@
*
* @return a <code>java.io.InputStream</code> object that contains
* this <code>SerialBlob</code> object's array of bytes
- * @throws SerialException if an error occurs
+ * @throws SerialException if an error occurs;
+ * if {@code free} had previously been called on this object
* @see #setBinaryStream
*/
public java.io.InputStream getBinaryStream() throws SerialException {
- InputStream stream = new ByteArrayInputStream(buf);
- return stream;
+ isValid();
+ InputStream stream = new ByteArrayInputStream(buf);
+ return stream;
}
/**
@@ -227,12 +239,14 @@
* position; <code>-1</code> if the pattern is not found
* or the given starting position is out of bounds; position
* numbering for the return value starts at <code>1</code>
- * @throws SerialException if an error occurs when serializing the blob
+ * @throws SerialException if an error occurs when serializing the blob;
+ * if {@code free} had previously been called on this object
* @throws SQLException if there is an error accessing the <code>BLOB</code>
* value from the database
*/
public long position(byte[] pattern, long start)
throws SerialException, SQLException {
+ isValid();
if (start < 1 || start > len) {
return -1;
}
@@ -270,12 +284,14 @@
* at the specified position; <code>-1</code> if the pattern is
* not found or the given starting position is out of bounds;
* position numbering for the return value starts at <code>1</code>
- * @throws SerialException if an error occurs when serializing the blob
+ * @throws SerialException if an error occurs when serializing the blob;
+ * if {@code free} had previously been called on this object
* @throws SQLException if there is an error accessing the <code>BLOB</code>
* value from the database
*/
public long position(Blob pattern, long start)
throws SerialException, SQLException {
+ isValid();
return position(pattern.getBytes(1, (int)(pattern.length())), start);
}
@@ -293,7 +309,8 @@
* @return the number of bytes written
* @throws SerialException if there is an error accessing the
* <code>BLOB</code> value; or if an invalid position is set; if an
- * invalid offset value is set
+ * invalid offset value is set;
+ * if {@code free} had previously been called on this object
* @throws SQLException if there is an error accessing the <code>BLOB</code>
* value from the database
* @see #getBytes
@@ -328,7 +345,8 @@
* <code>BLOB</code> value; if an invalid position is set; if an
* invalid offset value is set; if number of bytes to be written
* is greater than the <code>SerialBlob</code> length; or the combined
- * values of the length and offset is greater than the Blob buffer
+ * values of the length and offset is greater than the Blob buffer;
+ * if {@code free} had previously been called on this object
* @throws SQLException if there is an error accessing the <code>BLOB</code>
* value from the database.
* @see #getBytes
@@ -336,6 +354,7 @@
public int setBytes(long pos, byte[] bytes, int offset, int length)
throws SerialException, SQLException {
+ isValid();
if (offset < 0 || offset > bytes.length) {
throw new SerialException("Invalid offset in byte array set");
}
@@ -378,11 +397,13 @@
* @throws SQLException if there is an error accessing the
* <code>BLOB</code> value
* @throws SerialException if the SerialBlob in not instantiated with a
- * <code>Blob</code> object that supports <code>setBinaryStream()</code>
+ * <code>Blob</code> object that supports <code>setBinaryStream()</code>;
+ * if {@code free} had previously been called on this object
* @see #getBinaryStream
*/
public java.io.OutputStream setBinaryStream(long pos)
throws SerialException, SQLException {
+ isValid();
if (this.blob != null) {
return this.blob.setBinaryStream(pos);
} else {
@@ -400,54 +421,75 @@
* value that this <code>Blob</code> object represents should be
* truncated
* @throws SerialException if there is an error accessing the Blob value;
- * or the length to truncate is greater that the SerialBlob length
+ * or the length to truncate is greater that the SerialBlob length;
+ * if {@code free} had previously been called on this object
*/
public void truncate(long length) throws SerialException {
- if (length > len) {
- throw new SerialException
- ("Length more than what can be truncated");
- } else if((int)length == 0) {
- buf = new byte[0];
- len = length;
- } else {
- len = length;
- buf = this.getBytes(1, (int)len);
- }
+ isValid();
+ if (length > len) {
+ throw new SerialException
+ ("Length more than what can be truncated");
+ } else if((int)length == 0) {
+ buf = new byte[0];
+ len = length;
+ } else {
+ len = length;
+ buf = this.getBytes(1, (int)len);
+ }
}
/**
- * Returns an <code>InputStream</code> object that contains a partial <code>Blob</code> value,
- * starting with the byte specified by pos, which is length bytes in length.
+ * Returns an
+ * <code>InputStream</code> object that contains a partial
+ * {@code Blob} value, starting with the byte specified by pos, which is
+ * length bytes in length.
*
- * @param pos the offset to the first byte of the partial value to be retrieved.
- * The first byte in the <code>Blob</code> is at position 1
+ * @param pos the offset to the first byte of the partial value to be
+ * retrieved. The first byte in the {@code Blob} is at position 1
* @param length the length in bytes of the partial value to be retrieved
- * @return <code>InputStream</code> through which the partial <code>Blob</code> value can be read.
- * @throws SQLException if pos is less than 1 or if pos is greater than the number of bytes
- * in the <code>Blob</code> or if pos + length is greater than the number of bytes
- * in the <code>Blob</code>
+ * @return
+ * <code>InputStream</code> through which the partial {@code Blob} value can
+ * be read.
+ * @throws SQLException if pos is less than 1 or if pos is greater than the
+ * number of bytes in the {@code Blob} or if pos + length is greater than
+ * the number of bytes in the {@code Blob}
+ * @throws SerialException if the {@code free} method had been previously
+ * called on this object
*
* @since 1.6
*/
- public InputStream getBinaryStream(long pos,long length) throws SQLException {
- throw new java.lang.UnsupportedOperationException("Not supported");
+ public InputStream getBinaryStream(long pos, long length) throws SQLException {
+ isValid();
+ if (pos < 1 || pos > this.length()) {
+ throw new SerialException("Invalid position in BLOB object set");
+ }
+ if (length < 1 || length > len - pos + 1) {
+ throw new SerialException("length is < 1 or pos + length >"
+ + "total number of bytes");
+ }
+ return new ByteArrayInputStream(buf, (int) pos - 1, (int) length);
}
/**
- * This method frees the <code>Blob</code> object and releases the resources that it holds.
- * <code>Blob</code> object. The object is invalid once the <code>free</code>
- * method is called. If <code>free</code> is called multiple times, the subsequent
- * calls to <code>free</code> are treated as a no-op.
+ * This method frees the {@code SeriableBlob} object and releases the
+ * resources that it holds. The object is invalid once the {@code free}
+ * method is called. <p> If {@code free} is called multiple times, the
+ * subsequent calls to {@code free} are treated as a no-op. </P>
*
- * @throws SQLException if an error occurs releasing
- * the Blob's resources
+ * @throws SQLException if an error occurs releasing the Blob's resources
* @since 1.6
*/
public void free() throws SQLException {
- throw new java.lang.UnsupportedOperationException("Not supported");
+ if (buf != null) {
+ buf = null;
+ if (blob != null) {
+ blob.free();
+ }
+ blob = null;
+ }
}
/**
@@ -494,7 +536,7 @@
public Object clone() {
try {
SerialBlob sb = (SerialBlob) super.clone();
- sb.buf = Arrays.copyOf(buf, (int)len);
+ sb.buf = (buf != null) ? Arrays.copyOf(buf, (int)len) : null;
sb.blob = null;
return sb;
} catch (CloneNotSupportedException ex) {
@@ -541,9 +583,21 @@
}
/**
- * The identifier that assists in the serialization of this <code>SerialBlob</code>
- * object.
+ * Check to see if this object had previously had its {@code free} method
+ * called
+ *
+ * @throws SerialException
*/
+ private void isValid() throws SerialException {
+ if (buf == null) {
+ throw new SerialException("Error: You cannot call a method on a "
+ + "SerialBlob instance once free() has been called.");
+ }
+ }
+ /**
+ * The identifier that assists in the serialization of this
+ * {@code SerialBlob} object.
+ */
static final long serialVersionUID = -8144641928112860441L;
}
--- a/jdk/src/share/classes/javax/sql/rowset/serial/SerialClob.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/javax/sql/rowset/serial/SerialClob.java Mon Dec 17 08:30:06 2012 -0500
@@ -44,6 +44,11 @@
* from a <code>SerialClob</code> object or to locate the start of
* a pattern of characters.
*
+ * <h4> Thread safety </h4>
+ *
+ * <p> A SerialClob is not safe for use by multiple concurrent threads. If a
+ * SerialClob is to be used by more than one thread then access to the SerialClob
+ * should be controlled by appropriate synchronization.
* @author Jonathan Bruce
*/
public class SerialClob implements Clob, Serializable, Cloneable {
@@ -180,9 +185,11 @@
*
* @return a <code>long</code> indicating the length in characters of this
* <code>SerialClob</code> object's array of character
- * @throws SerialException if an error occurs
+ * @throws SerialException if an error occurs;
+ * if {@code free} had previously been called on this object
*/
public long length() throws SerialException {
+ isValid();
return len;
}
@@ -194,9 +201,11 @@
*
* @return a <code>java.io.Reader</code> object containing this
* <code>SerialClob</code> object's data
- * @throws SerialException if an error occurs
+ * @throws SerialException if an error occurs;
+ * if {@code free} had previously been called on this object
*/
public java.io.Reader getCharacterStream() throws SerialException {
+ isValid();
return (java.io.Reader) new CharArrayReader(buf);
}
@@ -210,13 +219,15 @@
*
* @return a <code>java.io.InputStream</code> object containing
* this <code>SerialClob</code> object's data
- * @throws SerialException if this <code>SerialClob</code> object was not instantiated
- * with a <code>Clob</code> object
+ * @throws SerialException if this {@code SerialClob} object was not
+ * instantiated with a <code>Clob</code> object;
+ * if {@code free} had previously been called on this object
* @throws SQLException if there is an error accessing the
- * <code>CLOB</code> value represented by the <code>Clob</code> object that was
- * used to create this <code>SerialClob</code> object
+ * <code>CLOB</code> value represented by the <code>Clob</code> object
+ * that was used to create this <code>SerialClob</code> object
*/
public java.io.InputStream getAsciiStream() throws SerialException, SQLException {
+ isValid();
if (this.clob != null) {
return this.clob.getAsciiStream();
} else {
@@ -248,12 +259,14 @@
* this <code>SerialClob</code> object beginning at the
* given position and containing the specified number of
* consecutive characters
- * @throws SerialException if either of the arguments is out of bounds
+ * @throws SerialException if either of the arguments is out of bounds;
+ * if {@code free} had previously been called on this object
*/
public String getSubString(long pos, int length) throws SerialException {
+ isValid();
if (pos < 1 || pos > this.length()) {
- throw new SerialException("Invalid position in BLOB object set");
+ throw new SerialException("Invalid position in SerialClob object set");
}
if ((pos-1) + length > this.length()) {
@@ -287,13 +300,14 @@
* <code>-1</code> if the given <code>String</code> object is
* not found or the starting position is out of bounds; position
* numbering for the return value starts at <code>1</code>
- * @throws SerialException if an error occurs locating the String signature
- * @throws SQLException if there is an error accessing the Blob value
+ * @throws SerialException if the {@code free} method had been
+ * previously called on this object
+ * @throws SQLException if there is an error accessing the Clob value
* from the database.
*/
public long position(String searchStr, long start)
throws SerialException, SQLException {
-
+ isValid();
if (start < 1 || start > len) {
return -1;
}
@@ -332,13 +346,14 @@
* @return the position at which the given <code>Clob</code>
* object begins in this <code>SerialClob</code> object,
* at or after the specified starting position
- * @throws SerialException if an error occurs locating the Clob signature
- * @throws SQLException if there is an error accessing the Blob value
+ * @throws SerialException if an error occurs locating the Clob signature;
+ * if the {@code free} method had been previously called on this object
+ * @throws SQLException if there is an error accessing the Clob value
* from the database
*/
public long position(Clob searchStr, long start)
throws SerialException, SQLException {
-
+ isValid();
return position(searchStr.getSubString(1,(int)searchStr.length()), start);
}
@@ -358,7 +373,8 @@
* <code>CLOB</code> value; if an invalid position is set; if an
* invalid offset value is set; if number of bytes to be written
* is greater than the <code>SerialClob</code> length; or the combined
- * values of the length and offset is greater than the Clob buffer
+ * values of the length and offset is greater than the Clob buffer;
+ * if the {@code free} method had been previously called on this object
*/
public int setString(long pos, String str) throws SerialException {
return (setString(pos, str, 0, str.length()));
@@ -383,10 +399,12 @@
* <code>CLOB</code> value; if an invalid position is set; if an
* invalid offset value is set; if number of bytes to be written
* is greater than the <code>SerialClob</code> length; or the combined
- * values of the length and offset is greater than the Clob buffer
+ * values of the length and offset is greater than the Clob buffer;
+ * if the {@code free} method had been previously called on this object
*/
public int setString(long pos, String str, int offset, int length)
throws SerialException {
+ isValid();
String temp = str.substring(offset);
char cPattern[] = temp.toCharArray();
@@ -395,7 +413,7 @@
}
if (pos < 1 || pos > this.length()) {
- throw new SerialException("Invalid position in BLOB object set");
+ throw new SerialException("Invalid position in Clob object set");
}
if ((long)(length) > origLen) {
@@ -430,13 +448,15 @@
* <code>CLOB</code> object
* @return the stream to which ASCII encoded characters can be written
* @throws SerialException if SerialClob is not instantiated with a
- * Clob object that supports <code>setAsciiStream</code>
+ * Clob object;
+ * if the {@code free} method had been previously called on this object
* @throws SQLException if there is an error accessing the
* <code>CLOB</code> value
* @see #getAsciiStream
*/
public java.io.OutputStream setAsciiStream(long pos)
throws SerialException, SQLException {
+ isValid();
if (this.clob != null) {
return this.clob.setAsciiStream(pos);
} else {
@@ -460,13 +480,15 @@
*
* @return a stream to which Unicode encoded characters can be written
* @throws SerialException if the SerialClob is not instantiated with
- * a Clob object that supports <code>setCharacterStream</code>
+ * a Clob object;
+ * if the {@code free} method had been previously called on this object
* @throws SQLException if there is an error accessing the
* <code>CLOB</code> value
* @see #getCharacterStream
*/
public java.io.Writer setCharacterStream(long pos)
throws SerialException, SQLException {
+ isValid();
if (this.clob != null) {
return this.clob.setCharacterStream(pos);
} else {
@@ -486,33 +508,80 @@
*
* @param length the length, in bytes, to which the <code>CLOB</code>
* value should be truncated
- * @throws SQLException if there is an error accessing the
- * <code>CLOB</code> value
+ * @throws SerialLException if there is an error accessing the
+ * <code>CLOB</code> value;
+ * if the {@code free} method had been previously called on this object
*/
public void truncate(long length) throws SerialException {
- if (length > len) {
- throw new SerialException
- ("Length more than what can be truncated");
- } else {
- len = length;
- // re-size the buffer
+ isValid();
+ if (length > len) {
+ throw new SerialException
+ ("Length more than what can be truncated");
+ } else {
+ len = length;
+ // re-size the buffer
- if (len == 0) {
- buf = new char[] {};
- } else {
+ if (len == 0) {
+ buf = new char[] {};
+ } else {
buf = (this.getSubString(1, (int)len)).toCharArray();
- }
-
- }
+ }
+ }
}
+ /**
+ * Returns a {@code Reader} object that contains a partial
+ * {@code SerialClob} value, starting
+ * with the character specified by pos, which is length characters in length.
+ *
+ * @param pos the offset to the first character of the partial value to
+ * be retrieved. The first character in the {@code SerialClob} is at position 1.
+ * @param length the length in characters of the partial value to be retrieved.
+ * @return {@code Reader} through which the partial {@code SerialClob}
+ * value can be read.
+ * @throws SQLException if pos is less than 1 or if pos is greater than the
+ * number of characters in the {@code SerialClob} or if pos + length
+ * is greater than the number of characters in the {@code SerialClob};
+ * @throws SerialException if the {@code free} method had been previously
+ * called on this object
+ * @since 1.6
+ */
public Reader getCharacterStream(long pos, long length) throws SQLException {
- throw new java.lang.UnsupportedOperationException("Not supported");
+ isValid();
+ if (pos < 1 || pos > len) {
+ throw new SerialException("Invalid position in Clob object set");
+ }
+
+ if ((pos-1) + length > len) {
+ throw new SerialException("Invalid position and substring length");
+ }
+ if (length <= 0) {
+ throw new SerialException("Invalid length specified");
+ }
+ return new CharArrayReader(buf, (int)pos, (int)length);
}
+ /**
+ * This method frees the {@code SeriableClob} object and releases the
+ * resources that it holds.
+ * The object is invalid once the {@code free} method is called.
+ * <p>
+ * If {@code free} is called multiple times, the subsequent
+ * calls to {@code free} are treated as a no-op.
+ * </P>
+ * @throws SQLException if an error occurs releasing
+ * the Clob's resources
+ * @since 1.6
+ */
public void free() throws SQLException {
- throw new java.lang.UnsupportedOperationException("Not supported");
+ if (buf != null) {
+ buf = null;
+ if (clob != null) {
+ clob.free();
+ }
+ clob = null;
+ }
}
/**
@@ -559,7 +628,7 @@
public Object clone() {
try {
SerialClob sc = (SerialClob) super.clone();
- sc.buf = Arrays.copyOf(buf, (int)len);
+ sc.buf = (buf != null) ? Arrays.copyOf(buf, (int)len) : null;
sc.clob = null;
return sc;
} catch (CloneNotSupportedException ex) {
@@ -605,7 +674,20 @@
}
/**
- * The identifier that assists in the serialization of this <code>SerialClob</code>
+ * Check to see if this object had previously had its {@code free} method
+ * called
+ *
+ * @throws SerialException
+ */
+ private void isValid() throws SerialException {
+ if (buf == null) {
+ throw new SerialException("Error: You cannot call a method on a "
+ + "SerialClob instance once free() has been called.");
+ }
+ }
+
+ /**
+ * The identifier that assists in the serialization of this {@code SerialClob}
* object.
*/
static final long serialVersionUID = -1662519690087375313L;
--- a/jdk/src/share/classes/javax/sql/rowset/serial/SerialDatalink.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/javax/sql/rowset/serial/SerialDatalink.java Mon Dec 17 08:30:06 2012 -0500
@@ -42,6 +42,12 @@
* <pre>
* java.net.URL url = rowset.getURL(1);
* </pre>
+ *
+ * <h4> Thread safety </h4>
+ *
+ * A SerialDatalink is not safe for use by multiple concurrent threads. If a
+ * SerialDatalink is to be used by more than one thread then access to the
+ * SerialDatalink should be controlled by appropriate synchronization.
*/
public class SerialDatalink implements Serializable, Cloneable {
--- a/jdk/src/share/classes/javax/sql/rowset/serial/SerialJavaObject.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/javax/sql/rowset/serial/SerialJavaObject.java Mon Dec 17 08:30:06 2012 -0500
@@ -44,6 +44,12 @@
* Static or transient fields cannot be serialized; an attempt to serialize
* them will result in a <code>SerialException</code> object being thrown.
*
+ * <h4> Thread safety </h4>
+ *
+ * A SerialJavaObject is not safe for use by multiple concurrent threads. If a
+ * SerialJavaObject is to be used by more than one thread then access to the
+ * SerialJavaObject should be controlled by appropriate synchronization.
+ *
* @author Jonathan Bruce
*/
public class SerialJavaObject implements Serializable, Cloneable {
--- a/jdk/src/share/classes/javax/sql/rowset/serial/SerialRef.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/javax/sql/rowset/serial/SerialRef.java Mon Dec 17 08:30:06 2012 -0500
@@ -36,6 +36,13 @@
* The <code>SerialRef</code> class provides a constructor for
* creating a <code>SerialRef</code> instance from a <code>Ref</code>
* object and provides methods for getting and setting the <code>Ref</code> object.
+ *
+ * <h4> Thread safety </h4>
+ *
+ * A SerialRef is not safe for use by multiple concurrent threads. If a
+ * SerialRef is to be used by more than one thread then access to the SerialRef
+ * should be controlled by appropriate synchronization.
+ *
*/
public class SerialRef implements Ref, Serializable, Cloneable {
--- a/jdk/src/share/classes/javax/sql/rowset/serial/SerialStruct.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/javax/sql/rowset/serial/SerialStruct.java Mon Dec 17 08:30:06 2012 -0500
@@ -50,6 +50,13 @@
* an instance from a <code>Struct</code> object, a method for retrieving
* the SQL type name of the SQL structured type in the database, and methods
* for retrieving its attribute values.
+ *
+ * <h4> Thread safety </h4>
+ *
+ * A SerialStruct is not safe for use by multiple concurrent threads. If a
+ * SerialStruct is to be used by more than one thread then access to the
+ * SerialStruct should be controlled by appropriate synchronization.
+ *
*/
public class SerialStruct implements Struct, Serializable, Cloneable {
--- a/jdk/src/share/classes/javax/swing/JColorChooser.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/javax/swing/JColorChooser.java Mon Dec 17 08:30:06 2012 -0500
@@ -182,6 +182,7 @@
dialog = new ColorChooserDialog((Dialog)window, title, modal, c, chooserPane,
okListener, cancelListener);
}
+ dialog.getAccessibleContext().setAccessibleDescription(title);
return dialog;
}
@@ -647,6 +648,7 @@
buttonPane.setLayout(new FlowLayout(FlowLayout.CENTER));
JButton okButton = new JButton(okString);
getRootPane().setDefaultButton(okButton);
+ okButton.getAccessibleContext().setAccessibleDescription(okString);
okButton.setActionCommand("OK");
okButton.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent e) {
@@ -659,6 +661,7 @@
buttonPane.add(okButton);
cancelButton = new JButton(cancelString);
+ cancelButton.getAccessibleContext().setAccessibleDescription(cancelString);
// The following few lines are used to register esc to close the dialog
Action cancelKeyAction = new AbstractAction() {
@@ -688,6 +691,7 @@
buttonPane.add(cancelButton);
JButton resetButton = new JButton(resetString);
+ resetButton.getAccessibleContext().setAccessibleDescription(resetString);
resetButton.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent e) {
reset();
--- a/jdk/src/share/classes/javax/swing/colorchooser/ColorChooserPanel.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/javax/swing/colorchooser/ColorChooserPanel.java Mon Dec 17 08:30:06 2012 -0500
@@ -135,6 +135,7 @@
String label = this.model.getText(this, "HexCode"); // NON-NLS: suffix
boolean visible = label != null;
this.text.setVisible(visible);
+ this.text.getAccessibleContext().setAccessibleDescription(label);
this.label.setVisible(visible);
if (visible) {
this.label.setText(label);
--- a/jdk/src/share/classes/javax/swing/colorchooser/ColorPanel.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/javax/swing/colorchooser/ColorPanel.java Mon Dec 17 08:30:06 2012 -0500
@@ -37,6 +37,7 @@
import javax.swing.JPanel;
import javax.swing.JRadioButton;
import javax.swing.border.EmptyBorder;
+import javax.swing.JSpinner.DefaultEditor;
final class ColorPanel extends JPanel implements ActionListener {
@@ -119,17 +120,26 @@
int count = this.model.getCount();
this.spinners[4].setVisible(count > 4);
for (int i = 0; i < count; i++) {
+ String text = this.model.getLabel(this, i);
Object object = this.spinners[i].getLabel();
if (object instanceof JRadioButton) {
JRadioButton button = (JRadioButton) object;
- button.setText(this.model.getLabel(this, i));
+ button.setText(text);
+ button.getAccessibleContext().setAccessibleDescription(text);
}
else if (object instanceof JLabel) {
JLabel label = (JLabel) object;
- label.setText(this.model.getLabel(this, i));
+ label.setText(text);
}
this.spinners[i].setRange(this.model.getMinimum(i), this.model.getMaximum(i));
this.spinners[i].setValue(this.values[i]);
+ this.spinners[i].getSlider().getAccessibleContext().setAccessibleName(text);
+ this.spinners[i].getSpinner().getAccessibleContext().setAccessibleName(text);
+ DefaultEditor editor = (DefaultEditor) this.spinners[i].getSpinner().getEditor();
+ editor.getTextField().getAccessibleContext().setAccessibleName(text);
+ this.spinners[i].getSlider().getAccessibleContext().setAccessibleDescription(text);
+ this.spinners[i].getSpinner().getAccessibleContext().setAccessibleDescription(text);
+ editor.getTextField().getAccessibleContext().setAccessibleDescription(text);
}
}
--- a/jdk/src/share/classes/javax/swing/plaf/basic/BasicColorChooserUI.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/javax/swing/plaf/basic/BasicColorChooserUI.java Mon Dec 17 08:30:06 2012 -0500
@@ -94,6 +94,7 @@
tabbedPane = new JTabbedPane();
tabbedPane.setName("ColorChooser.tabPane");
tabbedPane.setInheritsPopupMenu(true);
+ tabbedPane.getAccessibleContext().setAccessibleDescription(tabbedPane.getName());
singlePanel = new JPanel(new CenterLayout());
singlePanel.setName("ColorChooser.panel");
singlePanel.setInheritsPopupMenu(true);
--- a/jdk/src/share/classes/javax/swing/plaf/basic/BasicTreeUI.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/javax/swing/plaf/basic/BasicTreeUI.java Mon Dec 17 08:30:06 2012 -0500
@@ -1941,6 +1941,9 @@
for(int counter = beginRow + 1; counter <= endRow; counter++) {
testRect = getPathBounds(tree,
getPathForRow(tree, counter));
+ if (testRect == null) {
+ return;
+ }
if((testRect.y + testRect.height) > maxY)
counter = endRow;
}
@@ -2069,7 +2072,7 @@
treeState.invalidatePathBounds(oldPath);
updateSize();
}
- else {
+ else if (editingBounds != null) {
editingBounds.x = 0;
editingBounds.width = tree.getSize().width;
tree.repaint(editingBounds);
@@ -2114,6 +2117,9 @@
tree.isPathSelected(path), tree.isExpanded(path),
treeModel.isLeaf(path.getLastPathComponent()), row);
Rectangle nodeBounds = getPathBounds(tree, path);
+ if (nodeBounds == null) {
+ return false;
+ }
editingRow = row;
@@ -2134,6 +2140,9 @@
// To make sure x/y are updated correctly, fetch
// the bounds again.
nodeBounds = getPathBounds(tree, path);
+ if (nodeBounds == null) {
+ return false;
+ }
}
else
editorHasDifferentSize = false;
@@ -3570,7 +3579,7 @@
if(pressedPath != null) {
Rectangle bounds = getPathBounds(tree, pressedPath);
- if(e.getY() >= (bounds.y + bounds.height)) {
+ if (bounds == null || e.getY() >= (bounds.y + bounds.height)) {
return;
}
@@ -3832,6 +3841,10 @@
// And repaint
Rectangle newMinBounds = getPathBounds(tree, minPath);
+ if (minBounds == null || newMinBounds == null) {
+ return;
+ }
+
if (indices.length == 1 &&
newMinBounds.height == minBounds.height) {
tree.repaint(0, minBounds.y, tree.getWidth(),
@@ -4466,27 +4479,28 @@
}
}
Rectangle newRect = ui.getPathBounds(tree, newPath);
-
- newRect.x = visRect.x;
- newRect.width = visRect.width;
- if(direction == -1) {
- newRect.height = visRect.height;
- }
- else {
- newRect.y -= (visRect.height - newRect.height);
- newRect.height = visRect.height;
+ if (newRect != null) {
+ newRect.x = visRect.x;
+ newRect.width = visRect.width;
+ if(direction == -1) {
+ newRect.height = visRect.height;
+ }
+ else {
+ newRect.y -= (visRect.height - newRect.height);
+ newRect.height = visRect.height;
+ }
+
+ if(addToSelection) {
+ ui.extendSelection(newPath);
+ }
+ else if(changeSelection) {
+ tree.setSelectionPath(newPath);
+ }
+ else {
+ ui.setLeadSelectionPath(newPath, true);
+ }
+ tree.scrollRectToVisible(newRect);
}
-
- if(addToSelection) {
- ui.extendSelection(newPath);
- }
- else if(changeSelection) {
- tree.setSelectionPath(newPath);
- }
- else {
- ui.setLeadSelectionPath(newPath, true);
- }
- tree.scrollRectToVisible(newRect);
}
}
--- a/jdk/src/share/classes/sun/awt/TextureSizeConstraining.java Mon Dec 17 08:28:27 2012 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package sun.awt;
-
-/**
- * A GraphicsConfiguration implements the TextureSizeConstraining
- * interface to indicate that it imposes certain limitations on the
- * maximum size of supported textures.
- */
-public interface TextureSizeConstraining {
-
- /**
- * Returns the maximum width of any texture image.
- */
- public int getMaxTextureWidth();
-
- /**
- * Returns the maximum height of any texture image.
- */
- public int getMaxTextureHeight();
-
-}
--- a/jdk/src/share/classes/sun/java2d/opengl/OGLBlitLoops.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/java2d/opengl/OGLBlitLoops.java Mon Dec 17 08:30:06 2012 -0500
@@ -25,6 +25,7 @@
package sun.java2d.opengl;
+import java.awt.AlphaComposite;
import java.awt.Composite;
import java.awt.Transparency;
import java.awt.geom.AffineTransform;
@@ -99,6 +100,8 @@
CompositeType.AnyAlpha,
blitIntArgbPreToSurface),
+ new OGLAnyCompositeBlit(OGLSurfaceData.OpenGLSurface),
+
new OGLSwToSurfaceScale(SurfaceType.IntRgb,
OGLSurfaceData.PF_INT_RGB),
new OGLSwToSurfaceScale(SurfaceType.IntRgbx,
@@ -175,6 +178,9 @@
new OGLGeneralBlit(OGLSurfaceData.OpenGLTexture,
CompositeType.SrcNoEa,
blitIntArgbPreToTexture),
+
+ new OGLAnyCompositeBlit(OGLSurfaceData.OpenGLTexture),
+
};
GraphicsPrimitiveMgr.register(primitives);
}
@@ -763,3 +769,49 @@
}
}
}
+
+class OGLAnyCompositeBlit extends Blit {
+ private WeakReference<SurfaceData> dstTmp;
+
+ public OGLAnyCompositeBlit(SurfaceType dstType) {
+ super(SurfaceType.Any, CompositeType.Any, dstType);
+ }
+ public synchronized void Blit(SurfaceData src, SurfaceData dst,
+ Composite comp, Region clip,
+ int sx, int sy, int dx, int dy,
+ int w, int h)
+ {
+ Blit convertdst = Blit.getFromCache(dst.getSurfaceType(),
+ CompositeType.SrcNoEa,
+ SurfaceType.IntArgbPre);
+
+ SurfaceData cachedDst = null;
+
+ if (dstTmp != null) {
+ // use cached intermediate surface, if available
+ cachedDst = dstTmp.get();
+ }
+
+ // convert source to IntArgbPre
+ SurfaceData dstBuffer = convertFrom(convertdst, dst, dx, dy, w, h,
+ cachedDst, BufferedImage.TYPE_INT_ARGB_PRE);
+
+ Blit performop = Blit.getFromCache(src.getSurfaceType(),
+ CompositeType.Any, dstBuffer.getSurfaceType());
+
+ performop.Blit(src, dstBuffer, comp, clip,
+ sx, sy, 0, 0, w, h);
+
+ if (dstBuffer != cachedDst) {
+ // cache the intermediate surface
+ dstTmp = new WeakReference(dstBuffer);
+ }
+
+ // now blit the buffer back to the destination
+ convertdst = Blit.getFromCache(dstBuffer.getSurfaceType(),
+ CompositeType.SrcNoEa,
+ dst.getSurfaceType());
+ convertdst.Blit(dstBuffer, dst, AlphaComposite.Src,
+ clip, 0, 0, dx, dy, w, h);
+ }
+}
--- a/jdk/src/share/classes/sun/java2d/opengl/OGLSurfaceDataProxy.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/java2d/opengl/OGLSurfaceDataProxy.java Mon Dec 17 08:30:06 2012 -0500
@@ -76,6 +76,7 @@
CompositeType comp,
Color bgColor)
{
- return (bgColor == null || transparency == Transparency.OPAQUE);
+ return comp.isDerivedFrom(CompositeType.AnyAlpha) &&
+ (bgColor == null || transparency == Transparency.OPAQUE);
}
}
--- a/jdk/src/share/classes/sun/net/www/MessageHeader.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/net/www/MessageHeader.java Mon Dec 17 08:30:06 2012 -0500
@@ -137,6 +137,43 @@
return null;
}
+ /**
+ * Removes bare Negotiate and Kerberos headers when an "NTLM ..."
+ * appears. All Performed on headers with key being k.
+ * @return true if there is a change
+ */
+ public boolean filterNTLMResponses(String k) {
+ boolean found = false;
+ for (int i=0; i<nkeys; i++) {
+ if (k.equalsIgnoreCase(keys[i])
+ && values[i] != null && values[i].length() > 5
+ && values[i].substring(0, 5).equalsIgnoreCase("NTLM ")) {
+ found = true;
+ break;
+ }
+ }
+ if (found) {
+ int j = 0;
+ for (int i=0; i<nkeys; i++) {
+ if (k.equalsIgnoreCase(keys[i]) && (
+ "Negotiate".equalsIgnoreCase(values[i]) ||
+ "Kerberos".equalsIgnoreCase(values[i]))) {
+ continue;
+ }
+ if (i != j) {
+ keys[j] = keys[i];
+ values[j] = values[i];
+ }
+ j++;
+ }
+ if (j != nkeys) {
+ nkeys = j;
+ return true;
+ }
+ }
+ return false;
+ }
+
class HeaderIterator implements Iterator<String> {
int index = 0;
int next = -1;
--- a/jdk/src/share/classes/sun/net/www/protocol/http/HttpURLConnection.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/net/www/protocol/http/HttpURLConnection.java Mon Dec 17 08:30:06 2012 -0500
@@ -1326,6 +1326,16 @@
if (logger.isLoggable(PlatformLogger.FINE)) {
logger.fine(responses.toString());
}
+
+ boolean b1 = responses.filterNTLMResponses("WWW-Authenticate");
+ boolean b2 = responses.filterNTLMResponses("Proxy-Authenticate");
+ if (b1 || b2) {
+ if (logger.isLoggable(PlatformLogger.FINE)) {
+ logger.fine(">>>> Headers are filtered");
+ logger.fine(responses.toString());
+ }
+ }
+
inputStream = http.getInputStream();
respCode = getResponseCode();
@@ -1784,6 +1794,13 @@
logger.fine(responses.toString());
}
+ if (responses.filterNTLMResponses("Proxy-Authenticate")) {
+ if (logger.isLoggable(PlatformLogger.FINE)) {
+ logger.fine(">>>> Headers are filtered");
+ logger.fine(responses.toString());
+ }
+ }
+
statusLine = responses.getValue(0);
StringTokenizer st = new StringTokenizer(statusLine);
st.nextToken();
--- a/jdk/src/share/classes/sun/print/PathGraphics.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/print/PathGraphics.java Mon Dec 17 08:30:06 2012 -0500
@@ -1025,7 +1025,8 @@
continue;
}
glyph = font2D.charToGlyph(c);
- if (glyph != missingGlyph && glyph < numGlyphs &&
+ if (glyph != missingGlyph &&
+ glyph >= 0 && glyph < numGlyphs &&
(glyphToCharMap[glyph] ==
CharToGlyphMapper.INVISIBLE_GLYPH_ID)) {
glyphToCharMap[glyph] = c;
--- a/jdk/src/share/classes/sun/security/jgss/krb5/Krb5Util.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/security/jgss/krb5/Krb5Util.java Mon Dec 17 08:30:06 2012 -0500
@@ -40,10 +40,7 @@
import sun.security.krb5.KrbException;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Iterator;
import java.util.List;
-import java.util.Objects;
-import java.util.Set;
import sun.security.krb5.KerberosSecrets;
import sun.security.krb5.PrincipalName;
/**
@@ -189,18 +186,6 @@
return subject;
}
- // A special KerberosKey, used as keys read from a KeyTab object.
- // Each time new keys are read from KeyTab objects in the private
- // credentials set, old ones are removed and new ones added.
- public static class KeysFromKeyTab extends KerberosKey {
- private static final long serialVersionUID = 8238092170252746927L;
-
- public KeysFromKeyTab(KerberosKey key) {
- super(key.getPrincipal(), key.getEncoded(),
- key.getKeyType(), key.getVersionNumber());
- }
- }
-
/**
* Credentials of a service, the private secret to authenticate its
* identity, which can be:
@@ -239,7 +224,7 @@
// Compatibility with old behavior: even when there is no
// KerberosPrincipal, we can find one from KerberosKeys
List<KerberosKey> keys = SubjectComber.findMany(
- subj, null, null, KerberosKey.class);
+ subj, serverPrincipal, null, KerberosKey.class);
if (!keys.isEmpty()) {
sc.kp = keys.get(0).getPrincipal();
serverPrincipal = sc.kp.getName();
@@ -255,9 +240,9 @@
subj, null, null, KeyTab.class);
sc.kk = SubjectComber.findMany(
subj, serverPrincipal, null, KerberosKey.class);
- sc.tgt = SubjectComber.find(subj, null, null, KerberosTicket.class);
-
- if (sc.ktabs.isEmpty() && sc.kk.isEmpty()) {
+ sc.tgt = SubjectComber.find(
+ subj, null, serverPrincipal, KerberosTicket.class);
+ if (sc.ktabs.isEmpty() && sc.kk.isEmpty() && sc.tgt == null) {
return null;
}
return sc;
@@ -268,37 +253,16 @@
}
public KerberosKey[] getKKeys() {
- if (ktabs.isEmpty()) {
- return kk.toArray(new KerberosKey[kk.size()]);
- } else {
- List<KerberosKey> keys = new ArrayList<>();
- for (KeyTab ktab: ktabs) {
- for (KerberosKey k: ktab.getKeys(kp)) {
- keys.add(k);
- }
+ List<KerberosKey> keys = new ArrayList<>();
+ for (KerberosKey k: kk) {
+ keys.add(k);
+ }
+ for (KeyTab ktab: ktabs) {
+ for (KerberosKey k: ktab.getKeys(kp)) {
+ keys.add(k);
}
- // Compatibility: also add keys to privCredSet. Remove old
- // ones first, only remove those from keytab.
- if (!subj.isReadOnly()) {
- Set<Object> pcs = subj.getPrivateCredentials();
- synchronized (pcs) {
- Iterator<Object> iterator = pcs.iterator();
- while (iterator.hasNext()) {
- Object obj = iterator.next();
- if (obj instanceof KeysFromKeyTab) {
- KerberosKey key = (KerberosKey)obj;
- if (Objects.equals(key.getPrincipal(), kp)) {
- iterator.remove();
- }
- }
- }
- }
- for (KerberosKey key: keys) {
- subj.getPrivateCredentials().add(new KeysFromKeyTab(key));
- }
- }
- return keys.toArray(new KerberosKey[keys.size()]);
}
+ return keys.toArray(new KerberosKey[keys.size()]);
}
public EncryptionKey[] getEKeys() {
--- a/jdk/src/share/classes/sun/security/jgss/wrapper/SunNativeProvider.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/security/jgss/wrapper/SunNativeProvider.java Mon Dec 17 08:30:06 2012 -0500
@@ -90,10 +90,6 @@
"libgssapi_krb5.so",
"libgssapi_krb5.so.2",
};
- } else if (osname.contains("OS X")) {
- gssLibs = new String[]{
- "/usr/lib/sasl2/libgssapiv2.2.so",
- };
}
} else {
gssLibs = new String[]{ defaultLib };
--- a/jdk/src/share/classes/sun/security/krb5/EncryptionKey.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/security/krb5/EncryptionKey.java Mon Dec 17 08:30:06 2012 -0500
@@ -555,6 +555,12 @@
int ktype;
boolean etypeFound = false;
+
+ // When no matched kvno is found, returns tke key of the same
+ // etype with the highest kvno
+ int kvno_found = 0;
+ EncryptionKey key_found = null;
+
for (int i = 0; i < keys.length; i++) {
ktype = keys[i].getEType();
if (EType.isSupported(ktype)) {
@@ -563,6 +569,10 @@
etypeFound = true;
if (versionMatches(kvno, kv)) {
return keys[i];
+ } else if (kv > kvno_found) {
+ // kv is not null
+ key_found = keys[i];
+ kvno_found = kv;
}
}
}
@@ -580,12 +590,17 @@
etypeFound = true;
if (versionMatches(kvno, kv)) {
return new EncryptionKey(etype, keys[i].getBytes());
+ } else if (kv > kvno_found) {
+ key_found = new EncryptionKey(etype, keys[i].getBytes());
+ kvno_found = kv;
}
}
}
}
if (etypeFound) {
- throw new KrbException(Krb5.KRB_AP_ERR_BADKEYVER);
+ return key_found;
+ // For compatibility, will not fail here.
+ //throw new KrbException(Krb5.KRB_AP_ERR_BADKEYVER);
}
return null;
}
--- a/jdk/src/share/classes/sun/swing/plaf/synth/SynthFileChooserUI.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/swing/plaf/synth/SynthFileChooserUI.java Mon Dec 17 08:30:06 2012 -0500
@@ -256,6 +256,7 @@
if (getFileChooser().getControlButtonsAreShown()) {
approveButton.setText(getApproveButtonText(getFileChooser()));
approveButton.setToolTipText(getApproveButtonToolTipText(getFileChooser()));
+ approveButton.setMnemonic(getApproveButtonMnemonic(getFileChooser()));
}
}
--- a/jdk/src/share/classes/sun/text/resources/FormatData.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/FormatData.java Mon Dec 17 08:30:06 2012 -0500
@@ -50,6 +50,20 @@
* Overrides ListResourceBundle
*/
protected final Object[][] getContents() {
+ final String[] buddhistEras = new String[] { // Thai Buddhist calendar era strings
+ "BC", // BC
+ "B.E." // Buddhist Era
+ };
+
+ // Japanese imperial calendar era abbreviations
+ final String[] japaneseEraAbbrs = new String[] {
+ "",
+ "M",
+ "T",
+ "S",
+ "H",
+ };
+
return new Object[][] {
{ "MonthNames",
new String[] {
@@ -107,29 +121,49 @@
"Sat" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "S",
+ "M",
+ "T",
+ "W",
+ "T",
+ "F",
+ "S",
+ }
+ },
{ "AmPmMarkers",
new String[] {
"AM", // am marker
"PM" // pm marker
}
},
+ { "narrow.AmPmMarkers",
+ new String[] {
+ "a", // am marker
+ "p" // pm marker
+ }
+ },
{ "Eras",
new String[] { // era strings for GregorianCalendar
"BC",
"AD"
}
},
- { "buddhist.Eras",
- new String[] { // Thai Buddhist calendar era strings
- "BC", // BC
- "B.E." // Buddhist Era
+ { "narrow.Eras",
+ new String[] {
+ "B",
+ "A",
}
},
+ { "buddhist.Eras",
+ buddhistEras
+ },
{ "buddhist.short.Eras",
- new String[] { // Thai Buddhist calendar era strings
- "BC", // BC
- "B.E." // Buddhist Era
- }
+ buddhistEras
+ },
+ { "buddhist.narrow.Eras",
+ buddhistEras
},
{ "japanese.Eras",
new String[] { // Japanese imperial calendar era strings
@@ -141,13 +175,10 @@
}
},
{ "japanese.short.Eras",
- new String[] { // Japanese imperial calendar era abbreviations
- "",
- "M",
- "T",
- "S",
- "H",
- }
+ japaneseEraAbbrs
+ },
+ { "japanese.narrow.Eras",
+ japaneseEraAbbrs
},
{ "japanese.FirstYear",
new String[] { // Japanese imperial calendar year name
--- a/jdk/src/share/classes/sun/text/resources/ar/FormatData_ar.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/ar/FormatData_ar.java Mon Dec 17 08:30:06 2012 -0500
@@ -107,6 +107,17 @@
"\u0633" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "\u062d",
+ "\u0646",
+ "\u062b",
+ "\u0631",
+ "\u062e",
+ "\u062c",
+ "\u0633",
+ }
+ },
{ "AmPmMarkers",
new String[] {
"\u0635", // am marker
--- a/jdk/src/share/classes/sun/text/resources/be/FormatData_be.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/be/FormatData_be.java Mon Dec 17 08:30:06 2012 -0500
@@ -85,6 +85,23 @@
"" // abb month 13 if applicable
}
},
+ { "standalone.MonthNarrows",
+ new String[] {
+ "\u0441",
+ "\u043b",
+ "\u0441",
+ "\u043a",
+ "\u043c",
+ "\u0447",
+ "\u043b",
+ "\u0436",
+ "\u0432",
+ "\u043a",
+ "\u043b",
+ "\u0441",
+ "",
+ }
+ },
{ "DayNames",
new String[] {
"\u043d\u044f\u0434\u0437\u0435\u043b\u044f", // Sunday
@@ -107,6 +124,17 @@
"\u0441\u0431" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "\u043d",
+ "\u043f",
+ "\u0430",
+ "\u0441",
+ "\u0447",
+ "\u043f",
+ "\u0441",
+ }
+ },
{ "Eras",
new String[] { // era strings
"\u0434\u0430 \u043d.\u0435.",
--- a/jdk/src/share/classes/sun/text/resources/bg/FormatData_bg.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/bg/FormatData_bg.java Mon Dec 17 08:30:06 2012 -0500
@@ -107,6 +107,17 @@
"\u0421\u0431" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "\u043d",
+ "\u043f",
+ "\u0432",
+ "\u0441",
+ "\u0447",
+ "\u043f",
+ "\u0441",
+ }
+ },
{ "Eras",
new String[] { // era strings
"\u043f\u0440.\u043d.\u0435.",
--- a/jdk/src/share/classes/sun/text/resources/ca/FormatData_ca.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/ca/FormatData_ca.java Mon Dec 17 08:30:06 2012 -0500
@@ -119,6 +119,23 @@
"" // abb month 13 if applicable
}
},
+ { "standalone.MonthNarrows",
+ new String[] {
+ "g",
+ "f",
+ "m",
+ "a",
+ "m",
+ "j",
+ "j",
+ "a",
+ "s",
+ "o",
+ "n",
+ "d",
+ "",
+ }
+ },
{ "DayNames",
new String[] {
"diumenge", // Sunday
@@ -141,6 +158,28 @@
"ds." // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "G",
+ "L", // Note: contributed item in CDLR
+ "T",
+ "C",
+ "J",
+ "V",
+ "S",
+ }
+ },
+ { "standalone.DayNarrows",
+ new String[] {
+ "g",
+ "l",
+ "t",
+ "c",
+ "j",
+ "v",
+ "s",
+ }
+ },
{ "NumberElements",
new String[] {
",", // decimal separator
--- a/jdk/src/share/classes/sun/text/resources/cs/FormatData_cs.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/cs/FormatData_cs.java Mon Dec 17 08:30:06 2012 -0500
@@ -141,6 +141,17 @@
"So" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "N",
+ "P",
+ "\u00da",
+ "S",
+ "\u010c",
+ "P",
+ "S",
+ }
+ },
{ "AmPmMarkers",
new String[] {
"dop.", // am marker
--- a/jdk/src/share/classes/sun/text/resources/da/FormatData_da.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/da/FormatData_da.java Mon Dec 17 08:30:06 2012 -0500
@@ -124,6 +124,17 @@
"l\u00f8" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "S",
+ "M",
+ "T",
+ "O",
+ "T",
+ "F",
+ "L",
+ }
+ },
{ "NumberElements",
new String[] {
",", // decimal separator
--- a/jdk/src/share/classes/sun/text/resources/de/FormatData_de.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/de/FormatData_de.java Mon Dec 17 08:30:06 2012 -0500
@@ -124,6 +124,17 @@
"Sa" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "S",
+ "M",
+ "D",
+ "M",
+ "D",
+ "F",
+ "S",
+ }
+ },
{ "Eras",
new String[] { // era strings
"v. Chr.",
--- a/jdk/src/share/classes/sun/text/resources/el/FormatData_el.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/el/FormatData_el.java Mon Dec 17 08:30:06 2012 -0500
@@ -124,6 +124,17 @@
"\u03a3\u03b1\u03b2" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "\u039a",
+ "\u0394",
+ "\u03a4",
+ "\u03a4",
+ "\u03a0",
+ "\u03a0",
+ "\u03a3",
+ }
+ },
{ "AmPmMarkers",
new String[] {
"\u03c0\u03bc", // am marker
--- a/jdk/src/share/classes/sun/text/resources/es/FormatData_es.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/es/FormatData_es.java Mon Dec 17 08:30:06 2012 -0500
@@ -104,6 +104,17 @@
"s\u00e1b" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "D",
+ "L",
+ "M",
+ "X",
+ "J",
+ "V",
+ "S",
+ }
+ },
{ "NumberPatterns",
new String[] {
"#,##0.###;-#,##0.###", // decimal pattern
--- a/jdk/src/share/classes/sun/text/resources/et/FormatData_et.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/et/FormatData_et.java Mon Dec 17 08:30:06 2012 -0500
@@ -104,6 +104,17 @@
"L" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "P",
+ "E",
+ "T",
+ "K",
+ "N",
+ "R",
+ "L",
+ }
+ },
{ "Eras",
new String[] { // era strings
"e.m.a.",
--- a/jdk/src/share/classes/sun/text/resources/fi/FormatData_fi.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/fi/FormatData_fi.java Mon Dec 17 08:30:06 2012 -0500
@@ -116,6 +116,23 @@
"" // abb month 13 if applicable
}
},
+ { "standalone.MonthNarrows",
+ new String[] {
+ "T",
+ "H",
+ "M",
+ "H",
+ "T",
+ "K",
+ "H",
+ "E",
+ "S",
+ "L",
+ "M",
+ "J",
+ "",
+ }
+ },
{ "DayNames",
new String[] {
"sunnuntai", // Sunday
@@ -138,6 +155,28 @@
"la" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "S",
+ "M",
+ "T",
+ "K",
+ "T",
+ "P",
+ "L",
+ }
+ },
+ { "standalone.DayNarrows",
+ new String[] {
+ "S",
+ "M",
+ "T",
+ "K",
+ "T",
+ "P",
+ "L",
+ }
+ },
{ "NumberElements",
new String[] {
",", // decimal separator
@@ -181,6 +220,12 @@
"ip." // pm marker
}
},
+ { "narrow.AmPmMarkers",
+ new String[] {
+ "ap.",
+ "ip.",
+ }
+ },
};
}
}
--- a/jdk/src/share/classes/sun/text/resources/fr/FormatData_fr.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/fr/FormatData_fr.java Mon Dec 17 08:30:06 2012 -0500
@@ -104,6 +104,17 @@
"sam." // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "D",
+ "L",
+ "M",
+ "M",
+ "J",
+ "V",
+ "S",
+ }
+ },
{ "Eras",
new String[] { // era strings
"BC",
--- a/jdk/src/share/classes/sun/text/resources/hi/FormatData_hi_IN.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/hi/FormatData_hi_IN.java Mon Dec 17 08:30:06 2012 -0500
@@ -99,6 +99,17 @@
"\u0936\u0928\u093f" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "\u0930",
+ "\u0938\u094b",
+ "\u092e\u0902",
+ "\u092c\u0941",
+ "\u0917\u0941",
+ "\u0936\u0941",
+ "\u0936",
+ }
+ },
{ "AmPmMarkers",
new String[] {
"\u092a\u0942\u0930\u094d\u0935\u093e\u0939\u094d\u0928", // am marker
--- a/jdk/src/share/classes/sun/text/resources/hr/FormatData_hr.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/hr/FormatData_hr.java Mon Dec 17 08:30:06 2012 -0500
@@ -116,6 +116,23 @@
"" // abb month 13 if applicable
}
},
+ { "standalone.MonthNarrows",
+ new String[] {
+ "1.",
+ "2.",
+ "3.",
+ "4.",
+ "5.",
+ "6.",
+ "7.",
+ "8.",
+ "9.",
+ "10.",
+ "11.",
+ "12.",
+ "",
+ }
+ },
{ "DayNames",
new String[] {
"nedjelja", // Sunday
@@ -138,6 +155,28 @@
"sub" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "N",
+ "P",
+ "U",
+ "S",
+ "\u010c",
+ "P",
+ "S",
+ }
+ },
+ { "standalone.DayNarrows",
+ new String[] {
+ "n",
+ "p",
+ "u",
+ "s",
+ "\u010d",
+ "p",
+ "s",
+ }
+ },
{ "NumberElements",
new String[] {
",", // decimal separator
--- a/jdk/src/share/classes/sun/text/resources/hu/FormatData_hu.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/hu/FormatData_hu.java Mon Dec 17 08:30:06 2012 -0500
@@ -104,6 +104,17 @@
"Szo" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "V",
+ "H",
+ "K",
+ "Sz",
+ "Cs",
+ "P",
+ "Sz",
+ }
+ },
{ "AmPmMarkers",
new String[] {
"DE", // am marker
--- a/jdk/src/share/classes/sun/text/resources/is/FormatData_is.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/is/FormatData_is.java Mon Dec 17 08:30:06 2012 -0500
@@ -82,6 +82,23 @@
"" // abb month 13 if applicable
}
},
+ { "standalone.MonthNarrows",
+ new String[] {
+ "j",
+ "f",
+ "m",
+ "a",
+ "m",
+ "j",
+ "j",
+ "\u00e1",
+ "s",
+ "o",
+ "n",
+ "d",
+ "",
+ }
+ },
{ "DayNames",
new String[] {
"sunnudagur", // Sunday
@@ -104,6 +121,28 @@
"lau." // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "S",
+ "M",
+ "\u00de",
+ "M",
+ "F",
+ "F",
+ "L",
+ }
+ },
+ { "standalone.DayNarrows",
+ new String[] {
+ "s",
+ "m",
+ "\u00fe",
+ "m",
+ "f",
+ "f",
+ "l",
+ }
+ },
{ "NumberElements",
new String[] {
",", // decimal separator
--- a/jdk/src/share/classes/sun/text/resources/it/FormatData_it.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/it/FormatData_it.java Mon Dec 17 08:30:06 2012 -0500
@@ -121,6 +121,17 @@
"sab" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "D",
+ "L",
+ "M",
+ "M",
+ "G",
+ "V",
+ "S",
+ }
+ },
{ "Eras",
new String[] { // era strings
"BC",
--- a/jdk/src/share/classes/sun/text/resources/iw/FormatData_iw.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/iw/FormatData_iw.java Mon Dec 17 08:30:06 2012 -0500
@@ -121,6 +121,28 @@
"\u05e9" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "\u05d0",
+ "\u05d1",
+ "\u05d2",
+ "\u05d3",
+ "\u05d4",
+ "\u05d5",
+ "\u05e9",
+ }
+ },
+ { "standalone.DayNarrows",
+ new String[] {
+ "\u05d0",
+ "\u05d1",
+ "\u05d2",
+ "\u05d3",
+ "\u05d4",
+ "\u05d5",
+ "\u05e9",
+ }
+ },
{ "Eras",
new String[] { // era strings
"\u05dc\u05e1\u05d4\"\u05e0",
--- a/jdk/src/share/classes/sun/text/resources/ja/FormatData_ja.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/ja/FormatData_ja.java Mon Dec 17 08:30:06 2012 -0500
@@ -104,6 +104,17 @@
"\u571f" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "\u65e5",
+ "\u6708",
+ "\u706b",
+ "\u6c34",
+ "\u6728",
+ "\u91d1",
+ "\u571f",
+ }
+ },
{ "AmPmMarkers",
new String[] {
"\u5348\u524d", // am marker
--- a/jdk/src/share/classes/sun/text/resources/ko/FormatData_ko.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/ko/FormatData_ko.java Mon Dec 17 08:30:06 2012 -0500
@@ -104,6 +104,17 @@
"\ud1a0" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "\uc77c",
+ "\uc6d4",
+ "\ud654",
+ "\uc218",
+ "\ubaa9",
+ "\uae08",
+ "\ud1a0",
+ }
+ },
{ "AmPmMarkers",
new String[] {
"\uc624\uc804", // am marker
--- a/jdk/src/share/classes/sun/text/resources/lt/FormatData_lt.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/lt/FormatData_lt.java Mon Dec 17 08:30:06 2012 -0500
@@ -99,6 +99,23 @@
"" // abb month 13 if applicable
}
},
+ { "standalone.MonthNarrows",
+ new String[] {
+ "S",
+ "V",
+ "K",
+ "B",
+ "G",
+ "B",
+ "L",
+ "R",
+ "R",
+ "S",
+ "L",
+ "G",
+ "",
+ }
+ },
{ "DayNames",
new String[] {
"Sekmadienis", // Sunday
@@ -121,6 +138,28 @@
"\u0160t" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "S",
+ "P",
+ "A",
+ "T",
+ "K",
+ "P",
+ "\u0160",
+ }
+ },
+ { "standalone.DayNarrows",
+ new String[] {
+ "S",
+ "P",
+ "A",
+ "T",
+ "K",
+ "P",
+ "\u0160",
+ }
+ },
{ "Eras",
new String[] { // era strings
"pr.Kr.",
--- a/jdk/src/share/classes/sun/text/resources/lv/FormatData_lv.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/lv/FormatData_lv.java Mon Dec 17 08:30:06 2012 -0500
@@ -121,6 +121,17 @@
"S" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "S",
+ "P",
+ "O",
+ "T",
+ "C",
+ "P",
+ "S",
+ }
+ },
{ "Eras",
new String[] { // era strings
"pm\u0113",
--- a/jdk/src/share/classes/sun/text/resources/mk/FormatData_mk.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/mk/FormatData_mk.java Mon Dec 17 08:30:06 2012 -0500
@@ -104,6 +104,17 @@
"\u0441\u0430\u0431." // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "\u043d",
+ "\u043f",
+ "\u0432",
+ "\u0441",
+ "\u0447",
+ "\u043f",
+ "\u0441",
+ }
+ },
{ "Eras",
new String[] { // era strings
"\u043f\u0440.\u043d.\u0435.",
--- a/jdk/src/share/classes/sun/text/resources/ms/FormatData_ms.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/ms/FormatData_ms.java Mon Dec 17 08:30:06 2012 -0500
@@ -81,6 +81,23 @@
"",
}
},
+ { "standalone.MonthNarrows",
+ new String[] {
+ "J",
+ "F",
+ "M",
+ "A",
+ "M",
+ "J",
+ "J",
+ "O",
+ "S",
+ "O",
+ "N",
+ "D",
+ "",
+ }
+ },
{ "DayNames",
new String[] {
"Ahad",
@@ -103,6 +120,28 @@
"Sab",
}
},
+ { "DayNarrows",
+ new String[] {
+ "A",
+ "I",
+ "S",
+ "R",
+ "K",
+ "J",
+ "S",
+ }
+ },
+ { "standalone.DayNarrows",
+ new String[] {
+ "A",
+ "I",
+ "S",
+ "R",
+ "K",
+ "J",
+ "S",
+ }
+ },
{ "Eras",
new String[] {
"BCE",
--- a/jdk/src/share/classes/sun/text/resources/mt/FormatData_mt.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/mt/FormatData_mt.java Mon Dec 17 08:30:06 2012 -0500
@@ -103,6 +103,17 @@
"Sib",
}
},
+ { "DayNarrows",
+ new String[] {
+ "\u0126",
+ "T",
+ "T",
+ "E",
+ "\u0126",
+ "\u0120",
+ "S",
+ }
+ },
{ "AmPmMarkers",
new String[] {
"QN",
--- a/jdk/src/share/classes/sun/text/resources/nl/FormatData_nl.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/nl/FormatData_nl.java Mon Dec 17 08:30:06 2012 -0500
@@ -104,6 +104,17 @@
"za" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "Z",
+ "M",
+ "D",
+ "W",
+ "D",
+ "V",
+ "Z",
+ }
+ },
{ "Eras",
new String[] { // era strings for GregorianCalendar
"v. Chr.",
--- a/jdk/src/share/classes/sun/text/resources/pl/FormatData_pl.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/pl/FormatData_pl.java Mon Dec 17 08:30:06 2012 -0500
@@ -121,6 +121,17 @@
"So" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "N",
+ "P",
+ "W",
+ "\u015a",
+ "C",
+ "P",
+ "S",
+ }
+ },
{ "Eras",
new String[] { // era strings
"p.n.e.",
--- a/jdk/src/share/classes/sun/text/resources/pt/FormatData_pt.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/pt/FormatData_pt.java Mon Dec 17 08:30:06 2012 -0500
@@ -104,6 +104,17 @@
"S\u00e1b" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "D",
+ "S",
+ "T",
+ "Q",
+ "Q",
+ "S",
+ "S",
+ }
+ },
{ "NumberElements",
new String[] {
",", // decimal al separator
--- a/jdk/src/share/classes/sun/text/resources/ro/FormatData_ro.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/ro/FormatData_ro.java Mon Dec 17 08:30:06 2012 -0500
@@ -82,6 +82,23 @@
"" // abb month 13 if applicable
}
},
+ { "standalone.MonthNarrows",
+ new String[] {
+ "I",
+ "F",
+ "M",
+ "A",
+ "M",
+ "I",
+ "I",
+ "A",
+ "S",
+ "O",
+ "N",
+ "D",
+ "",
+ }
+ },
{ "DayNames",
new String[] {
"duminic\u0103", // Sunday
@@ -104,6 +121,29 @@
"S" // abb Saturday
}
},
+ // commented out DayNarrows because most names are contributed.
+// { "DayNarrows",
+// new String[] {
+// "D",
+// "",
+// "",
+// "",
+// "",
+// "",
+// "",
+// }
+// },
+ { "standalone.DayNarrows",
+ new String[] {
+ "D",
+ "L",
+ "M",
+ "M",
+ "J",
+ "V",
+ "S",
+ }
+ },
{ "Eras",
new String[] { // era strings
"d.C.",
--- a/jdk/src/share/classes/sun/text/resources/ru/FormatData_ru.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/ru/FormatData_ru.java Mon Dec 17 08:30:06 2012 -0500
@@ -138,6 +138,28 @@
"\u0421\u0431" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "\u0412",
+ "\u041f\u043d",
+ "\u0412\u0442",
+ "\u0421",
+ "\u0427",
+ "\u041f",
+ "\u0421", // contributed item in CLDR
+ }
+ },
+ { "standalone.DayNarrows",
+ new String[] {
+ "\u0412",
+ "\u041f",
+ "\u0412",
+ "\u0421",
+ "\u0427",
+ "\u041f",
+ "\u0421",
+ }
+ },
{ "Eras",
new String[] { // era strings
"\u0434\u043e \u043d.\u044d.",
--- a/jdk/src/share/classes/sun/text/resources/sk/FormatData_sk.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/sk/FormatData_sk.java Mon Dec 17 08:30:06 2012 -0500
@@ -138,6 +138,17 @@
"So" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "N",
+ "P",
+ "U",
+ "S",
+ "\u0160",
+ "P",
+ "S",
+ }
+ },
{ "Eras",
new String[] { // era strings
"pred n.l.",
--- a/jdk/src/share/classes/sun/text/resources/sl/FormatData_sl.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/sl/FormatData_sl.java Mon Dec 17 08:30:06 2012 -0500
@@ -121,6 +121,17 @@
"Sob" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "n",
+ "p",
+ "t",
+ "s",
+ "\u010d",
+ "p",
+ "s",
+ }
+ },
{ "Eras",
new String[] { // era strings
"pr.n.\u0161.",
--- a/jdk/src/share/classes/sun/text/resources/sq/FormatData_sq.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/sq/FormatData_sq.java Mon Dec 17 08:30:06 2012 -0500
@@ -104,6 +104,17 @@
"Sht" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "D",
+ "H",
+ "M",
+ "M",
+ "E",
+ "P",
+ "S",
+ }
+ },
{ "AmPmMarkers",
new String[] {
"PD", // am marker
--- a/jdk/src/share/classes/sun/text/resources/sr/FormatData_sr.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/sr/FormatData_sr.java Mon Dec 17 08:30:06 2012 -0500
@@ -103,12 +103,35 @@
"\u0441\u0443\u0431",
}
},
+ { "DayNarrows",
+ new String[] {
+ "\u043d",
+ "\u043f",
+ "\u0443",
+ "\u0441",
+ "\u0447",
+ "\u043f",
+ "\u0441",
+ }
+ },
{ "Eras",
new String[] {
"\u043f. \u043d. \u0435.",
"\u043d. \u0435",
}
},
+ { "short.Eras",
+ new String[] {
+ "\u043f. \u043d. \u0435.",
+ "\u043d. \u0435.",
+ }
+ },
+ { "narrow.Eras",
+ new String[] {
+ "\u043f.\u043d.\u0435.",
+ "\u043d.\u0435.",
+ }
+ },
{ "NumberPatterns",
new String[] {
"#,##0.###",
--- a/jdk/src/share/classes/sun/text/resources/sv/FormatData_sv.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/sv/FormatData_sv.java Mon Dec 17 08:30:06 2012 -0500
@@ -82,6 +82,23 @@
"" // abb month 13 if applicable
}
},
+ { "standalone.MonthNarrows",
+ new String[] {
+ "J",
+ "F",
+ "M",
+ "A",
+ "M",
+ "J",
+ "J",
+ "A",
+ "S",
+ "O",
+ "N",
+ "D",
+ "",
+ }
+ },
{ "DayNames",
new String[] {
"s\u00f6ndag", // Sunday
@@ -104,12 +121,46 @@
"l\u00f6" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "S",
+ "M",
+ "T",
+ "O",
+ "T",
+ "F",
+ "L",
+ }
+ },
+ { "standalone.DayNarrows",
+ new String[] {
+ "S",
+ "M",
+ "T",
+ "O",
+ "T",
+ "F",
+ "L",
+ }
+ },
+ { "narrow.Eras",
+ new String[] {
+ "f.Kr.",
+ "e.Kr.",
+ }
+ },
{ "AmPmMarkers",
new String[] {
"fm", // am marker
"em" // pm marker
}
},
+ { "narrow.AmPmMarkers",
+ new String[] {
+ "f",
+ "e",
+ }
+ },
{ "NumberElements",
new String[] {
",", // decimal separator
--- a/jdk/src/share/classes/sun/text/resources/th/FormatData_th.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/th/FormatData_th.java Mon Dec 17 08:30:06 2012 -0500
@@ -99,6 +99,23 @@
"" // abb month 13 if applicable
}
},
+ { "standalone.MonthNarrows",
+ new String[] {
+ "\u0e21.\u0e04.",
+ "\u0e01.\u0e1e.",
+ "\u0e21\u0e35.\u0e04.",
+ "\u0e40\u0e21.\u0e22.",
+ "\u0e1e.\u0e04.",
+ "\u0e21\u0e34.\u0e22.",
+ "\u0e01.\u0e04.",
+ "\u0e2a.\u0e04.",
+ "\u0e01.\u0e22.",
+ "\u0e15.\u0e04.",
+ "\u0e1e.\u0e22.",
+ "\u0e18.\u0e04.",
+ "",
+ }
+ },
{ "DayNames",
new String[] {
"\u0e27\u0e31\u0e19\u0e2d\u0e32\u0e17\u0e34\u0e15\u0e22\u0e4c", // Sunday
@@ -121,6 +138,17 @@
"\u0e2a." // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "\u0e2d",
+ "\u0e08",
+ "\u0e2d",
+ "\u0e1e",
+ "\u0e1e",
+ "\u0e28",
+ "\u0e2a",
+ }
+ },
{ "AmPmMarkers",
new String[] {
"\u0e01\u0e48\u0e2d\u0e19\u0e40\u0e17\u0e35\u0e48\u0e22\u0e07", // am marker
@@ -145,6 +173,12 @@
"\u0e04.\u0e28."
}
},
+ { "narrow.Eras",
+ new String[] {
+ "\u0e01\u0e48\u0e2d\u0e19 \u0e04.\u0e28.",
+ "\u0e04.\u0e28.",
+ }
+ },
{ "buddhist.TimePatterns",
timePatterns
},
--- a/jdk/src/share/classes/sun/text/resources/tr/FormatData_tr.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/tr/FormatData_tr.java Mon Dec 17 08:30:06 2012 -0500
@@ -82,6 +82,23 @@
"" // abb month 13 if applicable
}
},
+ { "standalone.MonthNarrows",
+ new String[] {
+ "O",
+ "\u015e",
+ "M",
+ "N",
+ "M",
+ "H",
+ "T",
+ "A",
+ "E",
+ "E",
+ "K",
+ "A",
+ "",
+ }
+ },
{ "DayNames",
new String[] {
"Pazar", // Sunday
@@ -104,6 +121,17 @@
"Cmt" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "P",
+ "P",
+ "S",
+ "\u00c7",
+ "P",
+ "C",
+ "C",
+ }
+ },
{ "NumberPatterns",
new String[] {
"#,##0.###;-#,##0.###", // decimal pattern
--- a/jdk/src/share/classes/sun/text/resources/uk/FormatData_uk.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/uk/FormatData_uk.java Mon Dec 17 08:30:06 2012 -0500
@@ -138,6 +138,17 @@
"\u0441\u0431" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "\u041d",
+ "\u041f",
+ "\u0412",
+ "\u0421",
+ "\u0427",
+ "\u041f",
+ "\u0421",
+ }
+ },
{ "Eras",
new String[] { // era strings
"\u0434\u043e \u043d.\u0435.",
--- a/jdk/src/share/classes/sun/text/resources/vi/FormatData_vi.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/vi/FormatData_vi.java Mon Dec 17 08:30:06 2012 -0500
@@ -106,6 +106,17 @@
"Th 7" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "CN",
+ "T2",
+ "T3",
+ "T4",
+ "T5",
+ "T6",
+ "T7",
+ }
+ },
{ "AmPmMarkers",
new String[] {
"SA", // am marker
--- a/jdk/src/share/classes/sun/text/resources/zh/FormatData_zh.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/text/resources/zh/FormatData_zh.java Mon Dec 17 08:30:06 2012 -0500
@@ -82,6 +82,23 @@
"" // abb month 13 if applicable
}
},
+ { "standalone.MonthNarrows",
+ new String[] {
+ "1\u6708",
+ "2\u6708",
+ "3\u6708",
+ "4\u6708",
+ "5\u6708",
+ "6\u6708",
+ "7\u6708",
+ "8\u6708",
+ "9\u6708",
+ "10\u6708",
+ "11\u6708",
+ "12\u6708",
+ "",
+ }
+ },
{ "DayNames",
new String[] {
"\u661f\u671f\u65e5", // Sunday
@@ -104,6 +121,17 @@
"\u661f\u671f\u516d" // abb Saturday
}
},
+ { "DayNarrows",
+ new String[] {
+ "\u65e5",
+ "\u4e00",
+ "\u4e8c",
+ "\u4e09",
+ "\u56db",
+ "\u4e94",
+ "\u516d",
+ }
+ },
{ "AmPmMarkers",
new String[] {
"\u4e0a\u5348", // am marker
--- a/jdk/src/share/classes/sun/util/cldr/CLDRLocaleProviderAdapter.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/util/cldr/CLDRLocaleProviderAdapter.java Mon Dec 17 08:30:06 2012 -0500
@@ -89,11 +89,6 @@
}
@Override
- public TimeZoneNameProvider getTimeZoneNameProvider() {
- return null;
- }
-
- @Override
public Locale[] getAvailableLocales() {
Set<String> all = createLanguageTagSet("All");
Locale[] locs = new Locale[all.size()];
--- a/jdk/src/share/classes/sun/util/locale/provider/CalendarDataUtility.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/util/locale/provider/CalendarDataUtility.java Mon Dec 17 08:30:06 2012 -0500
@@ -25,7 +25,6 @@
package sun.util.locale.provider;
-import java.util.Calendar;
import static java.util.Calendar.*;
import java.util.Locale;
import java.util.Map;
--- a/jdk/src/share/classes/sun/util/locale/provider/CalendarNameProviderImpl.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/util/locale/provider/CalendarNameProviderImpl.java Mon Dec 17 08:30:06 2012 -0500
@@ -52,7 +52,7 @@
@Override
public String getDisplayName(String calendarType, int field, int value, int style, Locale locale) {
String name = null;
- String key = getKey(calendarType, field, style);
+ String key = getResourceKey(calendarType, field, style);
if (key != null) {
ResourceBundle rb = LocaleProviderAdapter.forType(type).getLocaleData().getDateFormatData(locale);
if (rb.containsKey(key)) {
@@ -64,9 +64,10 @@
name = strings[value];
// If name is empty in standalone, try its `format' style.
if (name.length() == 0
- && (style == SHORT_STANDALONE || style == LONG_STANDALONE)) {
+ && (style == SHORT_STANDALONE || style == LONG_STANDALONE
+ || style == NARROW_STANDALONE)) {
name = getDisplayName(calendarType, field, value,
- style == SHORT_STANDALONE ? SHORT_FORMAT : LONG_FORMAT,
+ getBaseStyle(style),
locale);
}
}
@@ -75,15 +76,17 @@
return name;
}
+ private static int[] REST_OF_STYLES = {
+ SHORT_STANDALONE, LONG_FORMAT, LONG_STANDALONE,
+ NARROW_FORMAT, NARROW_STANDALONE
+ };
@Override
public Map<String, Integer> getDisplayNames(String calendarType, int field, int style, Locale locale) {
Map<String, Integer> names;
if (style == ALL_STYLES) {
names = getDisplayNamesImpl(calendarType, field, SHORT_FORMAT, locale);
- if (field != AM_PM) {
- for (int st : new int[] { SHORT_STANDALONE, LONG_FORMAT, LONG_STANDALONE }) {
- names.putAll(getDisplayNamesImpl(calendarType, field, st, locale));
- }
+ for (int st : REST_OF_STYLES) {
+ names.putAll(getDisplayNamesImpl(calendarType, field, st, locale));
}
} else {
// specific style
@@ -94,26 +97,28 @@
private Map<String, Integer> getDisplayNamesImpl(String calendarType, int field,
int style, Locale locale) {
- String key = getKey(calendarType, field, style);
+ String key = getResourceKey(calendarType, field, style);
Map<String, Integer> map = new TreeMap<>(LengthBasedComparator.INSTANCE);
if (key != null) {
ResourceBundle rb = LocaleProviderAdapter.forType(type).getLocaleData().getDateFormatData(locale);
if (rb.containsKey(key)) {
String[] strings = rb.getStringArray(key);
- if (field == YEAR) {
- if (strings.length > 0) {
- map.put(strings[0], 1);
- }
- } else {
- int base = (field == DAY_OF_WEEK) ? 1 : 0;
- for (int i = 0; i < strings.length; i++) {
- String name = strings[i];
- // Ignore any empty string (some standalone month names
- // are not defined)
- if (name.length() == 0) {
- continue;
+ if (!hasDuplicates(strings)) {
+ if (field == YEAR) {
+ if (strings.length > 0) {
+ map.put(strings[0], 1);
}
- map.put(name, base + i);
+ } else {
+ int base = (field == DAY_OF_WEEK) ? 1 : 0;
+ for (int i = 0; i < strings.length; i++) {
+ String name = strings[i];
+ // Ignore any empty string (some standalone month names
+ // are not defined)
+ if (name.length() == 0) {
+ continue;
+ }
+ map.put(name, base + i);
+ }
}
}
}
@@ -121,6 +126,10 @@
return map;
}
+ private int getBaseStyle(int style) {
+ return style & ~(SHORT_STANDALONE - SHORT_FORMAT);
+ }
+
/**
* Comparator implementation for TreeMap which iterates keys from longest
* to shortest.
@@ -180,55 +189,92 @@
return langtags;
}
- private int getIntData(String key, Locale locale) {
- ResourceBundle rb = LocaleProviderAdapter.forType(type).getLocaleData().getCalendarData(locale);
- if (rb.containsKey(key)) {
- String firstday = rb.getString(key);
- return Integer.parseInt(firstday);
+ private boolean hasDuplicates(String[] strings) {
+ int len = strings.length;
+ for (int i = 0; i < len - 1; i++) {
+ String a = strings[i];
+ if (a != null) {
+ for (int j = i + 1; j < len; j++) {
+ if (a.equals(strings[j])) {
+ return true;
+ }
+ }
+ }
}
- // Note that the base bundle of CLDR doesn't have the Calendar week parameters.
- return 0;
+ return false;
}
- private String getKey(String type, int field, int style) {
- boolean standalone = (style & 0x8000) != 0;
- style &= ~0x8000;
+ private String getResourceKey(String type, int field, int style) {
+ int baseStyle = getBaseStyle(style);
+ boolean isStandalone = (style != baseStyle);
if ("gregory".equals(type)) {
type = null;
}
-
+ boolean isNarrow = (baseStyle == NARROW_FORMAT);
StringBuilder key = new StringBuilder();
switch (field) {
case ERA:
if (type != null) {
key.append(type).append('.');
}
- if (style == SHORT) {
- key.append("short.");
+ if (isNarrow) {
+ key.append("narrow.");
+ } else {
+ // JRE and CLDR use different resource key conventions
+ // due to historical reasons. (JRE DateFormatSymbols.getEras returns
+ // abbreviations while other getShort*() return abbreviations.)
+ if (this.type == LocaleProviderAdapter.Type.JRE) {
+ if (baseStyle == SHORT) {
+ key.append("short.");
+ }
+ } else { // CLDR
+ if (baseStyle == LONG) {
+ key.append("long.");
+ }
+ }
}
key.append("Eras");
break;
case YEAR:
- key.append(type).append(".FirstYear");
+ if (!isNarrow) {
+ key.append(type).append(".FirstYear");
+ }
break;
case MONTH:
- if (standalone) {
+ if (isStandalone) {
key.append("standalone.");
}
- key.append(style == SHORT ? "MonthAbbreviations" : "MonthNames");
+ key.append("Month").append(toStyleName(baseStyle));
break;
case DAY_OF_WEEK:
- key.append(style == SHORT ? "DayAbbreviations" : "DayNames");
+ // support standalone narrow day names
+ if (isStandalone && isNarrow) {
+ key.append("standalone.");
+ }
+ key.append("Day").append(toStyleName(baseStyle));
break;
case AM_PM:
+ if (isNarrow) {
+ key.append("narrow.");
+ }
key.append("AmPmMarkers");
break;
}
return key.length() > 0 ? key.toString() : null;
}
+
+ private String toStyleName(int baseStyle) {
+ switch (baseStyle) {
+ case SHORT:
+ return "Abbreviations";
+ case NARROW_FORMAT:
+ return "Narrows";
+ }
+ return "Names";
+ }
}
--- a/jdk/src/share/classes/sun/util/locale/provider/LocaleResources.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/util/locale/provider/LocaleResources.java Mon Dec 17 08:30:06 2012 -0500
@@ -46,7 +46,7 @@
import java.util.ResourceBundle;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
-import sun.util.resources.OpenListResourceBundle;
+import sun.util.resources.TimeZoneNamesBundle;
/**
* Central accessor to locale-dependent resources.
@@ -67,13 +67,13 @@
this.locale = locale;
}
- public OpenListResourceBundle getTimeZoneNames() {
- OpenListResourceBundle tznames = (OpenListResourceBundle) cache.get("TimeZoneNames");
+ public TimeZoneNamesBundle getTimeZoneNames() {
+ TimeZoneNamesBundle tznames = (TimeZoneNamesBundle) cache.get("TimeZoneNames");
if (tznames == null) {
tznames = adapter.getLocaleData().getTimeZoneNames(locale);
- OpenListResourceBundle olrb = (OpenListResourceBundle) cache.putIfAbsent("TimeZoneNames", tznames);
- if (olrb != null) {
- tznames = olrb;
+ TimeZoneNamesBundle tznb = (TimeZoneNamesBundle) cache.putIfAbsent("TimeZoneNames", tznames);
+ if (tznb != null) {
+ tznames = tznb;
}
}
return tznames;
--- a/jdk/src/share/classes/sun/util/locale/provider/SPILocaleProviderAdapter.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/util/locale/provider/SPILocaleProviderAdapter.java Mon Dec 17 08:30:06 2012 -0500
@@ -604,5 +604,12 @@
assert tznp != null;
return tznp.getDisplayName(ID, daylight, style, locale);
}
+
+ @Override
+ public String getGenericDisplayName(String ID, int style, Locale locale) {
+ TimeZoneNameProvider tznp = getImpl(locale);
+ assert tznp != null;
+ return tznp.getGenericDisplayName(ID, style, locale);
+ }
}
}
--- a/jdk/src/share/classes/sun/util/locale/provider/TimeZoneNameProviderImpl.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/util/locale/provider/TimeZoneNameProviderImpl.java Mon Dec 17 08:30:06 2012 -0500
@@ -25,11 +25,14 @@
package sun.util.locale.provider;
+import java.util.LinkedHashSet;
import java.util.Locale;
-import java.util.ResourceBundle;
+import java.util.Map;
import java.util.Set;
import java.util.TimeZone;
import java.util.spi.TimeZoneNameProvider;
+import sun.util.calendar.ZoneInfo;
+import sun.util.resources.TimeZoneNamesBundle;
/**
* Concrete implementation of the
@@ -96,21 +99,67 @@
*/
@Override
public String getDisplayName(String id, boolean daylight, int style, Locale locale) {
+ String[] names = getDisplayNameArray(id, 5, locale);
+ if (names != null) {
+ int index = daylight ? 3 : 1;
+ if (style == TimeZone.SHORT) {
+ index++;
+ }
+ return names[index];
+ }
+ return null;
+ }
+
+ @Override
+ public String getGenericDisplayName(String id, int style, Locale locale) {
+ String[] names = getDisplayNameArray(id, 7, locale);
+ if (names != null && names.length >= 7) {
+ return names[(style == TimeZone.LONG) ? 5 : 6];
+ }
+ return null;
+ }
+
+ private String[] getDisplayNameArray(String id, int n, Locale locale) {
if (id == null || locale == null) {
throw new NullPointerException();
}
+ LocaleProviderAdapter adapter = LocaleProviderAdapter.forType(type);
+ TimeZoneNamesBundle rb = adapter.getLocaleResources(locale).getTimeZoneNames();
+ return rb.containsKey(id) ? rb.getStringArray(id, n) : null;
+ }
+ /**
+ * Returns a String[][] as the DateFormatSymbols.getZoneStrings() value for
+ * the given locale. This method is package private.
+ *
+ * @param locale a Locale for time zone names
+ * @return an array of time zone names arrays
+ */
+ String[][] getZoneStrings(Locale locale) {
LocaleProviderAdapter adapter = LocaleProviderAdapter.forType(type);
- ResourceBundle rb = adapter.getLocaleResources(locale).getTimeZoneNames();
- if (rb.containsKey(id)) {
- String[] names = rb.getStringArray(id);
- int index = daylight ? 3 : 1;
- if (style == TimeZone.SHORT) {
- index++;
+ TimeZoneNamesBundle rb = adapter.getLocaleResources(locale).getTimeZoneNames();
+ Set<String> keyset = rb.keySet();
+ // Use a LinkedHashSet to preseve the order
+ Set<String[]> value = new LinkedHashSet<>();
+ for (String key : keyset) {
+ value.add(rb.getStringArray(key));
+ }
+
+ // Add aliases data for CLDR
+ if (type == LocaleProviderAdapter.Type.CLDR) {
+ // Note: TimeZoneNamesBundle creates a String[] on each getStringArray call.
+ Map<String, String> aliases = ZoneInfo.getAliasTable();
+ for (String alias : aliases.keySet()) {
+ if (!keyset.contains(alias)) {
+ String tzid = aliases.get(alias);
+ if (keyset.contains(tzid)) {
+ String[] val = rb.getStringArray(tzid);
+ val[0] = alias;
+ value.add(val);
+ }
}
- return names[index];
}
-
- return null;
+ }
+ return value.toArray(new String[0][]);
}
}
--- a/jdk/src/share/classes/sun/util/locale/provider/TimeZoneNameUtility.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/util/locale/provider/TimeZoneNameUtility.java Mon Dec 17 08:30:06 2012 -0500
@@ -26,28 +26,28 @@
package sun.util.locale.provider;
import java.lang.ref.SoftReference;
-import java.util.Enumeration;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
-import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.spi.TimeZoneNameProvider;
import sun.util.calendar.ZoneInfo;
import sun.util.resources.OpenListResourceBundle;
+import sun.util.resources.TimeZoneNamesBundle;
/**
* Utility class that deals with the localized time zone names
*
* @author Naoto Sato
+ * @author Masayoshi Okutsu
*/
public final class TimeZoneNameUtility {
/**
* cache to hold time zone resource bundles. Keyed by Locale
*/
- private static ConcurrentHashMap<Locale, SoftReference<OpenListResourceBundle>> cachedBundles =
+ private static ConcurrentHashMap<Locale, SoftReference<TimeZoneNamesBundle>> cachedBundles =
new ConcurrentHashMap<>();
/**
@@ -73,15 +73,19 @@
}
private static String[][] loadZoneStrings(Locale locale) {
+ // If the provider is a TimeZoneNameProviderImpl, call its getZoneStrings
+ // in order to avoid per-ID retrieval.
+ LocaleProviderAdapter adapter = LocaleProviderAdapter.getAdapter(TimeZoneNameProvider.class, locale);
+ TimeZoneNameProvider provider = adapter.getTimeZoneNameProvider();
+ if (provider instanceof TimeZoneNameProviderImpl) {
+ return ((TimeZoneNameProviderImpl)provider).getZoneStrings(locale);
+ }
+
+ // Performs per-ID retrieval.
List<String[]> zones = new LinkedList<>();
OpenListResourceBundle rb = getBundle(locale);
- Enumeration<String> keys = rb.getKeys();
- String[] names;
-
- while(keys.hasMoreElements()) {
- String key = keys.nextElement();
-
- names = retrieveDisplayNames(rb, key, locale);
+ for (String key : rb.keySet()) {
+ String[] names = retrieveDisplayNamesImpl(key, locale);
if (names != null) {
zones.add(names);
}
@@ -95,24 +99,50 @@
* Retrieve display names for a time zone ID.
*/
public static String[] retrieveDisplayNames(String id, Locale locale) {
- OpenListResourceBundle rb = getBundle(locale);
- return retrieveDisplayNames(rb, id, locale);
- }
-
- private static String[] retrieveDisplayNames(OpenListResourceBundle rb,
- String id, Locale locale) {
if (id == null || locale == null) {
throw new NullPointerException();
}
+ return retrieveDisplayNamesImpl(id, locale);
+ }
+ /**
+ * Retrieves a generic time zone display name for a time zone ID.
+ *
+ * @param id time zone ID
+ * @param style TimeZone.LONG or TimeZone.SHORT
+ * @param locale desired Locale
+ * @return the requested generic time zone display name, or null if not found.
+ */
+ public static String retrieveGenericDisplayName(String id, int style, Locale locale) {
LocaleServiceProviderPool pool =
LocaleServiceProviderPool.getPool(TimeZoneNameProvider.class);
- return pool.getLocalizedObject(TimeZoneNameGetter.INSTANCE, locale, id);
+ return pool.getLocalizedObject(TimeZoneNameGetter.INSTANCE, locale, "generic", style, id);
}
- private static OpenListResourceBundle getBundle(Locale locale) {
- OpenListResourceBundle rb;
- SoftReference<OpenListResourceBundle> data = cachedBundles.get(locale);
+ /**
+ * Retrieves a standard or daylight-saving time name for the given time zone ID.
+ *
+ * @param id time zone ID
+ * @param daylight true for a daylight saving time name, or false for a standard time name
+ * @param style TimeZone.LONG or TimeZone.SHORT
+ * @param locale desired Locale
+ * @return the requested time zone name, or null if not found.
+ */
+ public static String retrieveDisplayName(String id, boolean daylight, int style, Locale locale) {
+ LocaleServiceProviderPool pool =
+ LocaleServiceProviderPool.getPool(TimeZoneNameProvider.class);
+ return pool.getLocalizedObject(TimeZoneNameGetter.INSTANCE, locale, daylight ? "dst" : "std", style, id);
+ }
+
+ private static String[] retrieveDisplayNamesImpl(String id, Locale locale) {
+ LocaleServiceProviderPool pool =
+ LocaleServiceProviderPool.getPool(TimeZoneNameProvider.class);
+ return pool.getLocalizedObject(TimeZoneNameArrayGetter.INSTANCE, locale, id);
+ }
+
+ private static TimeZoneNamesBundle getBundle(Locale locale) {
+ TimeZoneNamesBundle rb;
+ SoftReference<TimeZoneNamesBundle> data = cachedBundles.get(locale);
if (data == null || ((rb = data.get()) == null)) {
rb = LocaleProviderAdapter.forJRE().getLocaleData().getTimeZoneNames(locale);
@@ -127,19 +157,18 @@
* Obtains a localized time zone strings from a TimeZoneNameProvider
* implementation.
*/
- private static class TimeZoneNameGetter
+ private static class TimeZoneNameArrayGetter
implements LocaleServiceProviderPool.LocalizedObjectGetter<TimeZoneNameProvider,
String[]>{
- private static final TimeZoneNameGetter INSTANCE =
- new TimeZoneNameGetter();
+ private static final TimeZoneNameArrayGetter INSTANCE =
+ new TimeZoneNameArrayGetter();
@Override
public String[] getObject(TimeZoneNameProvider timeZoneNameProvider,
- Locale locale,
- String requestID,
- Object... params) {
+ Locale locale,
+ String requestID,
+ Object... params) {
assert params.length == 0;
- String queryID = requestID;
// First, try to get names with the request ID
String[] names = buildZoneStrings(timeZoneNameProvider, locale, requestID);
@@ -150,21 +179,15 @@
if (aliases != null) {
// Check whether this id is an alias, if so,
// look for the standard id.
- if (aliases.containsKey(queryID)) {
- String prevID = queryID;
- while ((queryID = aliases.get(queryID)) != null) {
- prevID = queryID;
- }
- queryID = prevID;
+ String canonicalID = aliases.get(requestID);
+ if (canonicalID != null) {
+ names = buildZoneStrings(timeZoneNameProvider, locale, canonicalID);
}
-
- names = buildZoneStrings(timeZoneNameProvider, locale, queryID);
-
if (names == null) {
// There may be a case that a standard id has become an
// alias. so, check the aliases backward.
names = examineAliases(timeZoneNameProvider, locale,
- queryID, aliases, aliases.entrySet());
+ canonicalID == null ? requestID : canonicalID, aliases);
}
}
}
@@ -178,20 +201,18 @@
private static String[] examineAliases(TimeZoneNameProvider tznp, Locale locale,
String id,
- Map<String, String> aliases,
- Set<Map.Entry<String, String>> aliasesSet) {
+ Map<String, String> aliases) {
if (aliases.containsValue(id)) {
- for (Map.Entry<String, String> entry : aliasesSet) {
+ for (Map.Entry<String, String> entry : aliases.entrySet()) {
if (entry.getValue().equals(id)) {
String alias = entry.getKey();
String[] names = buildZoneStrings(tznp, locale, alias);
if (names != null) {
return names;
- } else {
- names = examineAliases(tznp, locale, alias, aliases, aliasesSet);
- if (names != null) {
- return names;
- }
+ }
+ names = examineAliases(tznp, locale, alias, aliases);
+ if (names != null) {
+ return names;
}
}
}
@@ -201,7 +222,7 @@
}
private static String[] buildZoneStrings(TimeZoneNameProvider tznp,
- Locale locale, String id) {
+ Locale locale, String id) {
String[] names = new String[5];
for (int i = 1; i <= 4; i ++) {
@@ -220,6 +241,77 @@
}
}
+ private static class TimeZoneNameGetter
+ implements LocaleServiceProviderPool.LocalizedObjectGetter<TimeZoneNameProvider,
+ String> {
+ private static final TimeZoneNameGetter INSTANCE =
+ new TimeZoneNameGetter();
+
+ @Override
+ public String getObject(TimeZoneNameProvider timeZoneNameProvider,
+ Locale locale,
+ String requestID,
+ Object... params) {
+ assert params.length == 2;
+ int style = (int) params[0];
+ String tzid = (String) params[1];
+ String value = getName(timeZoneNameProvider, locale, requestID, style, tzid);
+ if (value == null) {
+ Map<String, String> aliases = ZoneInfo.getAliasTable();
+ if (aliases != null) {
+ String canonicalID = aliases.get(tzid);
+ if (canonicalID != null) {
+ value = getName(timeZoneNameProvider, locale, requestID, style, canonicalID);
+ }
+ if (value == null) {
+ value = examineAliases(timeZoneNameProvider, locale, requestID,
+ canonicalID != null ? canonicalID : tzid, style, aliases);
+ }
+ }
+ }
+
+ return value;
+ }
+
+ private static String examineAliases(TimeZoneNameProvider tznp, Locale locale,
+ String requestID, String tzid, int style,
+ Map<String, String> aliases) {
+ if (aliases.containsValue(tzid)) {
+ for (Map.Entry<String, String> entry : aliases.entrySet()) {
+ if (entry.getValue().equals(tzid)) {
+ String alias = entry.getKey();
+ String name = getName(tznp, locale, requestID, style, alias);
+ if (name != null) {
+ return name;
+ }
+ name = examineAliases(tznp, locale, requestID, alias, style, aliases);
+ if (name != null) {
+ return name;
+ }
+ }
+ }
+ }
+ return null;
+ }
+
+ private static String getName(TimeZoneNameProvider timeZoneNameProvider,
+ Locale locale, String requestID, int style, String tzid) {
+ String value = null;
+ switch (requestID) {
+ case "std":
+ value = timeZoneNameProvider.getDisplayName(tzid, false, style, locale);
+ break;
+ case "dst":
+ value = timeZoneNameProvider.getDisplayName(tzid, true, style, locale);
+ break;
+ case "generic":
+ value = timeZoneNameProvider.getGenericDisplayName(tzid, style, locale);
+ break;
+ }
+ return value;
+ }
+ }
+
// No instantiation
private TimeZoneNameUtility() {
}
--- a/jdk/src/share/classes/sun/util/resources/LocaleData.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/util/resources/LocaleData.java Mon Dec 17 08:30:06 2012 -0500
@@ -46,9 +46,9 @@
import java.util.List;
import java.util.Locale;
import java.util.ResourceBundle;
+import sun.util.locale.provider.LocaleDataMetaInfo;
import sun.util.locale.provider.LocaleProviderAdapter;
import static sun.util.locale.provider.LocaleProviderAdapter.Type.JRE;
-import sun.util.locale.provider.LocaleDataMetaInfo;
/**
* Provides information about and access to resource bundles in the
@@ -94,8 +94,8 @@
* Gets a time zone names resource bundle, using privileges
* to allow accessing a sun.* package.
*/
- public OpenListResourceBundle getTimeZoneNames(Locale locale) {
- return (OpenListResourceBundle) getBundle(type.getUtilResourcesPackage() + ".TimeZoneNames", locale);
+ public TimeZoneNamesBundle getTimeZoneNames(Locale locale) {
+ return (TimeZoneNamesBundle) getBundle(type.getUtilResourcesPackage() + ".TimeZoneNames", locale);
}
/**
@@ -158,30 +158,33 @@
/* Get the locale string list from LocaleDataMetaInfo class. */
String localeString = LocaleDataMetaInfo.getSupportedLocaleString(baseName);
- if (localeString == null || localeString.length() == 0) {
- return candidates;
- }
-
- for (Iterator<Locale> l = candidates.iterator(); l.hasNext(); ) {
- Locale loc = l.next();
- String lstr;
- if (loc.getScript().length() > 0) {
- lstr = loc.toLanguageTag().replace('-', '_');
- } else {
- lstr = loc.toString();
- int idx = lstr.indexOf("_#");
- if (idx >= 0) {
- lstr = lstr.substring(0, idx);
+ if (localeString != null && localeString.length() != 0) {
+ for (Iterator<Locale> l = candidates.iterator(); l.hasNext();) {
+ Locale loc = l.next();
+ String lstr;
+ if (loc.getScript().length() > 0) {
+ lstr = loc.toLanguageTag().replace('-', '_');
+ } else {
+ lstr = loc.toString();
+ int idx = lstr.indexOf("_#");
+ if (idx >= 0) {
+ lstr = lstr.substring(0, idx);
+ }
+ }
+ /* Every locale string in the locale string list returned from
+ the above getSupportedLocaleString is enclosed
+ within two white spaces so that we could check some locale
+ such as "en".
+ */
+ if (lstr.length() != 0 && localeString.indexOf(" " + lstr + " ") == -1) {
+ l.remove();
}
}
- /* Every locale string in the locale string list returned from
- the above getSupportedLocaleString is enclosed
- within two white spaces so that we could check some locale
- such as "en".
- */
- if (lstr.length() != 0 && localeString.indexOf(" " + lstr + " ") == -1) {
- l.remove();
- }
+ }
+ // Force fallback to Locale.ENGLISH for CLDR time zone names support
+ if (locale.getLanguage() != "en"
+ && baseName.contains(CLDR) && baseName.endsWith("TimeZoneNames")) {
+ candidates.add(candidates.size() - 1, Locale.ENGLISH);
}
return candidates;
}
--- a/jdk/src/share/classes/sun/util/resources/OpenListResourceBundle.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/util/resources/OpenListResourceBundle.java Mon Dec 17 08:30:06 2012 -0500
@@ -67,6 +67,7 @@
}
// Implements java.util.ResourceBundle.handleGetObject; inherits javadoc specification.
+ @Override
public Object handleGetObject(String key) {
if (key == null) {
throw new NullPointerException();
@@ -79,6 +80,7 @@
/**
* Implementation of ResourceBundle.getKeys.
*/
+ @Override
public Enumeration<String> getKeys() {
ResourceBundle parent = this.parent;
return new ResourceBundleEnumeration(handleGetKeys(),
@@ -86,7 +88,8 @@
}
/**
- * Returns a set of keys provided in this resource bundle
+ * Returns a set of keys provided in this resource bundle,
+ * including no parents.
*/
public Set<String> handleGetKeys() {
loadLookupTablesIfNecessary();
@@ -99,7 +102,7 @@
if (keyset != null) {
return keyset;
}
- Set<String> ks = new HashSet<>();
+ Set<String> ks = createSet();
ks.addAll(handleGetKeys());
if (parent != null) {
ks.addAll(parent.keySet());
@@ -113,13 +116,6 @@
}
/**
- * Returns the parent bundle
- */
- public OpenListResourceBundle getParent() {
- return (OpenListResourceBundle)parent;
- }
-
- /**
* See ListResourceBundle class description.
*/
abstract protected Object[][] getContents();
@@ -160,10 +156,14 @@
* Lets subclasses provide specialized Map implementations.
* Default uses HashMap.
*/
- protected Map<String, Object> createMap(int size) {
+ protected <K, V> Map<K, V> createMap(int size) {
return new HashMap<>(size);
}
+ protected <E> Set<E> createSet() {
+ return new HashSet<>();
+ }
+
private volatile Map<String, Object> lookup = null;
private volatile Set<String> keyset;
}
--- a/jdk/src/share/classes/sun/util/resources/TimeZoneNames.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/util/resources/TimeZoneNames.java Mon Dec 17 08:30:06 2012 -0500
@@ -43,160 +43,238 @@
public final class TimeZoneNames extends TimeZoneNamesBundle {
protected final Object[][] getContents() {
+ // Note: generic names came from CLDR with some adjustments.
String ACT[] = new String[] {"Acre Time", "ACT",
- "Acre Summer Time", "ACST"};
+ "Acre Summer Time", "ACST",
+ "Acre Time", "ACT"};
String ADELAIDE[] = new String[] {"Central Standard Time (South Australia)", "CST",
- "Central Summer Time (South Australia)", "CST"};
+ "Central Summer Time (South Australia)", "CST",
+ "Central Time (South Australia)", "CT"};
String AGT[] = new String[] {"Argentine Time", "ART",
- "Argentine Summer Time", "ARST"};
+ "Argentine Summer Time", "ARST",
+ "Argentine Time", "ART"};
String AKST[] = new String[] {"Alaska Standard Time", "AKST",
- "Alaska Daylight Time", "AKDT"};
+ "Alaska Daylight Time", "AKDT",
+ "Alaska Time", "AKT"};
String AMT[] = new String[] {"Amazon Time", "AMT",
- "Amazon Summer Time", "AMST"};
+ "Amazon Summer Time", "AMST",
+ "Amazon Time", "AMT"};
String ARAST[] = new String[] {"Arabia Standard Time", "AST",
- "Arabia Daylight Time", "ADT"};
+ "Arabia Daylight Time", "ADT",
+ "Arabia Time", "AT"};
String ARMT[] = new String[] {"Armenia Time", "AMT",
- "Armenia Summer Time", "AMST"};
+ "Armenia Summer Time", "AMST",
+ "Armenia Time", "AMT"};
String AST[] = new String[] {"Atlantic Standard Time", "AST",
- "Atlantic Daylight Time", "ADT"};
+ "Atlantic Daylight Time", "ADT",
+ "Atlantic Time", "AT"};
String BDT[] = new String[] {"Bangladesh Time", "BDT",
- "Bangladesh Summer Time", "BDST"};
+ "Bangladesh Summer Time", "BDST",
+ "Bangladesh Time", "BDT"};
String BRISBANE[] = new String[] {"Eastern Standard Time (Queensland)", "EST",
- "Eastern Summer Time (Queensland)", "EST"};
+ "Eastern Summer Time (Queensland)", "EST",
+ "Eastern Time (Queensland)", "ET"};
String BROKEN_HILL[] = new String[] {"Central Standard Time (South Australia/New South Wales)", "CST",
- "Central Summer Time (South Australia/New South Wales)", "CST"};
+ "Central Summer Time (South Australia/New South Wales)", "CST",
+ "Central Time (South Australia/New South Wales)", "CT"};
String BRT[] = new String[] {"Brasilia Time", "BRT",
- "Brasilia Summer Time", "BRST"};
+ "Brasilia Summer Time", "BRST",
+ "Brasilia Time", "BRT"};
String BTT[] = new String[] {"Bhutan Time", "BTT",
- "Bhutan Summer Time", "BTST"};
+ "Bhutan Summer Time", "BTST",
+ "Bhutan Time", "BTT"};
String CAT[] = new String[] {"Central African Time", "CAT",
- "Central African Summer Time", "CAST"};
+ "Central African Summer Time", "CAST",
+ "Central Africa Time", "CAT"};
String CET[] = new String[] {"Central European Time", "CET",
- "Central European Summer Time", "CEST"};
+ "Central European Summer Time", "CEST",
+ "Central European Time", "CET"};
String CHAST[] = new String[] {"Chatham Standard Time", "CHAST",
- "Chatham Daylight Time", "CHADT"};
+ "Chatham Daylight Time", "CHADT",
+ "Chatham Time", "CHAT"};
String CHUT[] = new String[] {"Chuuk Time", "CHUT",
- "Chuuk Summer Time", "CHUST"};
+ "Chuuk Summer Time", "CHUST",
+ "Chuuk Time", "CHUT"};
String CIT[] = new String[] {"Central Indonesia Time", "CIT",
- "Central Indonesia Summer Time", "CIST"};
+ "Central Indonesia Summer Time", "CIST",
+ "Central Indonesia Time", "CIT"};
String CLT[] = new String[] {"Chile Time", "CLT",
- "Chile Summer Time", "CLST"};
+ "Chile Summer Time", "CLST",
+ "Chile Time", "CLT"};
String CST[] = new String[] {"Central Standard Time", "CST",
- "Central Daylight Time", "CDT"};
+ "Central Daylight Time", "CDT",
+ "Central Time", "CT"};
String CTT[] = new String[] {"China Standard Time", "CST",
- "China Daylight Time", "CDT"};
+ "China Daylight Time", "CDT",
+ "China Time", "CT"};
String CUBA[] = new String[] {"Cuba Standard Time", "CST",
- "Cuba Daylight Time", "CDT"};
+ "Cuba Daylight Time", "CDT",
+ "Cuba Time", "CT"};
String DARWIN[] = new String[] {"Central Standard Time (Northern Territory)", "CST",
- "Central Summer Time (Northern Territory)", "CST"};
+ "Central Summer Time (Northern Territory)", "CST",
+ "Central Time (Northern Territory)", "CT"};
String DUBLIN[] = new String[] {"Greenwich Mean Time", "GMT",
- "Irish Summer Time", "IST"};
+ "Irish Summer Time", "IST",
+ "Irish Time", "IT"};
String EAT[] = new String[] {"Eastern African Time", "EAT",
- "Eastern African Summer Time", "EAST"};
+ "Eastern African Summer Time", "EAST",
+ "Eastern Africa Time", "EAT"};
String EASTER[] = new String[] {"Easter Is. Time", "EAST",
- "Easter Is. Summer Time", "EASST"};
+ "Easter Is. Summer Time", "EASST",
+ "Easter Is. Time", "EAST"};
String EET[] = new String[] {"Eastern European Time", "EET",
- "Eastern European Summer Time", "EEST"};
+ "Eastern European Summer Time", "EEST",
+ "Eastern European Time", "EET"};
String EGT[] = new String[] {"Eastern Greenland Time", "EGT",
- "Eastern Greenland Summer Time", "EGST"};
+ "Eastern Greenland Summer Time", "EGST",
+ "Eastern Greenland Time", "EGT"};
String EST[] = new String[] {"Eastern Standard Time", "EST",
- "Eastern Daylight Time", "EDT"};
+ "Eastern Daylight Time", "EDT",
+ "Eastern Time", "ET"};
String EST_NSW[] = new String[] {"Eastern Standard Time (New South Wales)", "EST",
- "Eastern Summer Time (New South Wales)", "EST"};
+ "Eastern Summer Time (New South Wales)", "EST",
+ "Eastern Time (New South Wales)", "ET"};
String FET[] = new String[] {"Further-eastern European Time", "FET",
- "Further-eastern European Summer Time", "FEST"};
+ "Further-eastern European Summer Time", "FEST",
+ "Further-eastern European Time", "FET"};
String GHMT[] = new String[] {"Ghana Mean Time", "GMT",
- "Ghana Summer Time", "GHST"};
+ "Ghana Summer Time", "GHST",
+ "Ghana Mean Time", "GMT"};
String GAMBIER[] = new String[] {"Gambier Time", "GAMT",
- "Gambier Summer Time", "GAMST"};
+ "Gambier Summer Time", "GAMST",
+ "Gambier Time", "GAMT"};
String GMT[] = new String[] {"Greenwich Mean Time", "GMT",
+ "Greenwich Mean Time", "GMT",
"Greenwich Mean Time", "GMT"};
String GMTBST[] = new String[] {"Greenwich Mean Time", "GMT",
- "British Summer Time", "BST"};
+ "British Summer Time", "BST",
+ "British Time", "BT"};
String GST[] = new String[] {"Gulf Standard Time", "GST",
- "Gulf Daylight Time", "GDT"};
+ "Gulf Daylight Time", "GDT",
+ "Gulf Time", "GT"};
String HAST[] = new String[] {"Hawaii-Aleutian Standard Time", "HAST",
- "Hawaii-Aleutian Daylight Time", "HADT"};
+ "Hawaii-Aleutian Daylight Time", "HADT",
+ "Hawaii-Aleutian Time", "HAT"};
String HKT[] = new String[] {"Hong Kong Time", "HKT",
- "Hong Kong Summer Time", "HKST"};
+ "Hong Kong Summer Time", "HKST",
+ "Hong Kong Time", "HKT"};
String HST[] = new String[] {"Hawaii Standard Time", "HST",
- "Hawaii Daylight Time", "HDT"};
+ "Hawaii Daylight Time", "HDT",
+ "Hawaii Time", "HT"};
String ICT[] = new String[] {"Indochina Time", "ICT",
- "Indochina Summer Time", "ICST"};
+ "Indochina Summer Time", "ICST",
+ "Indochina Time", "ICT"};
String IRT[] = new String[] {"Iran Standard Time", "IRST",
- "Iran Daylight Time", "IRDT"};
+ "Iran Daylight Time", "IRDT",
+ "Iran Time", "IRT"};
String ISRAEL[] = new String[] {"Israel Standard Time", "IST",
- "Israel Daylight Time", "IDT"};
+ "Israel Daylight Time", "IDT",
+ "Israel Time", "IT"};
String IST[] = new String[] {"India Standard Time", "IST",
- "India Daylight Time", "IDT"};
+ "India Daylight Time", "IDT",
+ "India Time", "IT"};
String JST[] = new String[] {"Japan Standard Time", "JST",
- "Japan Daylight Time", "JDT"};
+ "Japan Daylight Time", "JDT",
+ "Japan Time", "JT"};
String KST[] = new String[] {"Korea Standard Time", "KST",
- "Korea Daylight Time", "KDT"};
+ "Korea Daylight Time", "KDT",
+ "Korea Time", "KT"};
String LORD_HOWE[] = new String[] {"Lord Howe Standard Time", "LHST",
- "Lord Howe Summer Time", "LHST"};
+ "Lord Howe Summer Time", "LHST",
+ "Lord Howe Time", "LHT"};
String MHT[] = new String[] {"Marshall Islands Time", "MHT",
- "Marshall Islands Summer Time", "MHST"};
+ "Marshall Islands Summer Time", "MHST",
+ "Marshall Islands Time", "MHT"};
String MSK[] = new String[] {"Moscow Standard Time", "MSK",
- "Moscow Daylight Time", "MSD"};
+ "Moscow Daylight Time", "MSD",
+ "Moscow Time", "MT"};
String MST[] = new String[] {"Mountain Standard Time", "MST",
- "Mountain Daylight Time", "MDT"};
+ "Mountain Daylight Time", "MDT",
+ "Mountain Time", "MT"};
String MYT[] = new String[] {"Malaysia Time", "MYT",
- "Malaysia Summer Time", "MYST"};
+ "Malaysia Summer Time", "MYST",
+ "Malaysia Time", "MYT"};
String NORONHA[] = new String[] {"Fernando de Noronha Time", "FNT",
- "Fernando de Noronha Summer Time", "FNST"};
+ "Fernando de Noronha Summer Time", "FNST",
+ "Fernando de Noronha Time", "FNT"};
String NOVT[] = new String[] {"Novosibirsk Time", "NOVT",
- "Novosibirsk Summer Time", "NOVST"};
+ "Novosibirsk Summer Time", "NOVST",
+ "Novosibirsk Time", "NOVT"};
String NPT[] = new String[] {"Nepal Time", "NPT",
- "Nepal Summer Time", "NPST"};
+ "Nepal Summer Time", "NPST",
+ "Nepal Time", "NPT"};
String NST[] = new String[] {"Newfoundland Standard Time", "NST",
- "Newfoundland Daylight Time", "NDT"};
+ "Newfoundland Daylight Time", "NDT",
+ "Newfoundland Time", "NT"};
String NZST[] = new String[] {"New Zealand Standard Time", "NZST",
- "New Zealand Daylight Time", "NZDT"};
+ "New Zealand Daylight Time", "NZDT",
+ "New Zealand Time", "NZT"};
String PITCAIRN[] = new String[] {"Pitcairn Standard Time", "PST",
- "Pitcairn Daylight Time", "PDT"};
+ "Pitcairn Daylight Time", "PDT",
+ "Pitcairn Time", "PT"};
String PKT[] = new String[] {"Pakistan Time", "PKT",
- "Pakistan Summer Time", "PKST"};
+ "Pakistan Summer Time", "PKST",
+ "Pakistan Time", "PKT"};
String PONT[] = new String[] {"Pohnpei Time", "PONT",
- "Pohnpei Summer Time", "PONST"};
+ "Pohnpei Summer Time", "PONST",
+ "Ponape Time", "PONT"};
String PST[] = new String[] {"Pacific Standard Time", "PST",
- "Pacific Daylight Time", "PDT"};
+ "Pacific Daylight Time", "PDT",
+ "Pacific Time", "PT"};
String SAST[] = new String[] {"South Africa Standard Time", "SAST",
- "South Africa Summer Time", "SAST"};
+ "South Africa Summer Time", "SAST",
+ "South Africa Time", "SAT"};
String SBT[] = new String[] {"Solomon Is. Time", "SBT",
- "Solomon Is. Summer Time", "SBST"};
+ "Solomon Is. Summer Time", "SBST",
+ "Solomon Is. Time", "SBT"};
String SGT[] = new String[] {"Singapore Time", "SGT",
- "Singapore Summer Time", "SGST"};
+ "Singapore Summer Time", "SGST",
+ "Singapore Time", "SGT"};
String SLST[] = new String[] {"Greenwich Mean Time", "GMT",
- "Sierra Leone Summer Time", "SLST"};
+ "Sierra Leone Summer Time", "SLST",
+ "Sierra Leone Time", "SLT"};
String TASMANIA[] = new String[] {"Eastern Standard Time (Tasmania)", "EST",
- "Eastern Summer Time (Tasmania)", "EST"};
+ "Eastern Summer Time (Tasmania)", "EST",
+ "Eastern Time (Tasmania)", "ET"};
String TMT[] = new String[] {"Turkmenistan Time", "TMT",
- "Turkmenistan Summer Time", "TMST"};
+ "Turkmenistan Summer Time", "TMST",
+ "Turkmenistan Time", "TMT"};
String ULAT[]= new String[] {"Ulaanbaatar Time", "ULAT",
- "Ulaanbaatar Summer Time", "ULAST"};
+ "Ulaanbaatar Summer Time", "ULAST",
+ "Ulaanbaatar Time", "ULAT"};
String WART[] = new String[] {"Western Argentine Time", "WART",
- "Western Argentine Summer Time", "WARST"};
+ "Western Argentine Summer Time", "WARST",
+ "Western Argentine Time", "WART"};
String WAT[] = new String[] {"Western African Time", "WAT",
- "Western African Summer Time", "WAST"};
+ "Western African Summer Time", "WAST",
+ "Western African Time", "WAT"};
String WET[] = new String[] {"Western European Time", "WET",
- "Western European Summer Time", "WEST"};
+ "Western European Summer Time", "WEST",
+ "Western European Time", "WET"};
String WIT[] = new String[] {"West Indonesia Time", "WIT",
- "West Indonesia Summer Time", "WIST"};
+ "West Indonesia Summer Time", "WIST",
+ "West Indonesia Time", "WIT"};
String WST_AUS[] = new String[] {"Western Standard Time (Australia)", "WST",
- "Western Summer Time (Australia)", "WST"};
+ "Western Summer Time (Australia)", "WST",
+ "Western Time (Australia)", "WT"};
String SAMOA[] = new String[] {"Samoa Standard Time", "SST",
- "Samoa Daylight Time", "SDT"};
+ "Samoa Daylight Time", "SDT",
+ "Samoa Time", "ST"};
String WST_SAMOA[] = new String[] {"West Samoa Time", "WST",
- "West Samoa Daylight Time", "WSDT"};
+ "West Samoa Daylight Time", "WSDT",
+ "West Samoa Time", "WST"};
String ChST[] = new String[] {"Chamorro Standard Time", "ChST",
- "Chamorro Daylight Time", "ChDT"};
+ "Chamorro Daylight Time", "ChDT",
+ "Chamorro Time", "ChT"};
String VICTORIA[] = new String[] {"Eastern Standard Time (Victoria)", "EST",
- "Eastern Summer Time (Victoria)", "EST"};
+ "Eastern Summer Time (Victoria)", "EST",
+ "Eastern Time (Victoria)", "ET"};
String UTC[] = new String[] {"Coordinated Universal Time", "UTC",
+ "Coordinated Universal Time", "UTC",
"Coordinated Universal Time", "UTC"};
String UZT[] = new String[] {"Uzbekistan Time", "UZT",
- "Uzbekistan Summer Time", "UZST"};
+ "Uzbekistan Summer Time", "UZST",
+ "Uzbekistan Time", "UZT"};
return new Object[][] {
{"America/Los_Angeles", PST},
@@ -309,7 +387,8 @@
{"America/Argentina/Ushuaia", AGT},
{"America/Aruba", AST},
{"America/Asuncion", new String[] {"Paraguay Time", "PYT",
- "Paraguay Summer Time", "PYST"}},
+ "Paraguay Summer Time", "PYST",
+ "Paraguay Time", "PYT"}},
{"America/Atikokan", EST},
{"America/Atka", HAST},
{"America/Bahia", BRT},
@@ -320,17 +399,20 @@
{"America/Blanc-Sablon", AST},
{"America/Boa_Vista", AMT},
{"America/Bogota", new String[] {"Colombia Time", "COT",
- "Colombia Summer Time", "COST"}},
+ "Colombia Summer Time", "COST",
+ "Colombia Time", "COT"}},
{"America/Boise", MST},
{"America/Buenos_Aires", AGT},
{"America/Cambridge_Bay", MST},
{"America/Campo_Grande", AMT},
{"America/Cancun", CST},
{"America/Caracas", new String[] {"Venezuela Time", "VET",
- "Venezuela Summer Time", "VEST"}},
+ "Venezuela Summer Time", "VEST",
+ "Venezuela Time", "VET"}},
{"America/Catamarca", AGT},
{"America/Cayenne", new String[] {"French Guiana Time", "GFT",
- "French Guiana Summer Time", "GFST"}},
+ "French Guiana Summer Time", "GFST",
+ "French Guiana Time", "GFT"}},
{"America/Cayman", EST},
{"America/Chihuahua", MST},
{"America/Creston", MST},
@@ -352,16 +434,19 @@
{"America/Fortaleza", BRT},
{"America/Glace_Bay", AST},
{"America/Godthab", new String[] {"Western Greenland Time", "WGT",
- "Western Greenland Summer Time", "WGST"}},
+ "Western Greenland Summer Time", "WGST",
+ "Western Greenland Time", "WGT"}},
{"America/Goose_Bay", AST},
{"America/Grand_Turk", EST},
{"America/Grenada", AST},
{"America/Guadeloupe", AST},
{"America/Guatemala", CST},
{"America/Guayaquil", new String[] {"Ecuador Time", "ECT",
- "Ecuador Summer Time", "ECST"}},
+ "Ecuador Summer Time", "ECST",
+ "Ecuador Time", "ECT"}},
{"America/Guyana", new String[] {"Guyana Time", "GYT",
- "Guyana Summer Time", "GYST"}},
+ "Guyana Summer Time", "GYST",
+ "Guyana Time", "GYT"}},
{"America/Havana", CUBA},
{"America/Hermosillo", MST},
{"America/Indiana/Indianapolis", EST},
@@ -382,9 +467,11 @@
{"America/Knox_IN", CST},
{"America/Kralendijk", AST},
{"America/La_Paz", new String[] {"Bolivia Time", "BOT",
- "Bolivia Summer Time", "BOST"}},
+ "Bolivia Summer Time", "BOST",
+ "Bolivia Time", "BOT"}},
{"America/Lima", new String[] {"Peru Time", "PET",
- "Peru Summer Time", "PEST"}},
+ "Peru Summer Time", "PEST",
+ "Peru Time", "PET"}},
{"America/Louisville", EST},
{"America/Lower_Princes", AST},
{"America/Maceio", BRT},
@@ -398,13 +485,16 @@
{"America/Menominee", CST},
{"America/Merida", CST},
{"America/Metlakatla", new String[] {"Metlakatla Standard Time", "MeST",
- "Metlakatla Daylight Time", "MeDT"}},
+ "Metlakatla Daylight Time", "MeDT",
+ "Metlakatla Time", "MeT"}},
{"America/Mexico_City", CST},
{"America/Miquelon", new String[] {"Pierre & Miquelon Standard Time", "PMST",
- "Pierre & Miquelon Daylight Time", "PMDT"}},
+ "Pierre & Miquelon Daylight Time", "PMDT",
+ "Pierre & Miquelon Time", "PMT"}},
{"America/Moncton", AST},
{"America/Montevideo", new String[] {"Uruguay Time", "UYT",
- "Uruguay Summer Time", "UYST"}},
+ "Uruguay Summer Time", "UYST",
+ "Uruguay Time", "UYT"}},
{"America/Monterrey", CST},
{"America/Montreal", EST},
{"America/Montserrat", AST},
@@ -419,7 +509,8 @@
{"America/Panama", EST},
{"America/Pangnirtung", EST},
{"America/Paramaribo", new String[] {"Suriname Time", "SRT",
- "Suriname Summer Time", "SRST"}},
+ "Suriname Summer Time", "SRST",
+ "Suriname Time", "SRT"}},
{"America/Port-au-Prince", EST},
{"America/Port_of_Spain", AST},
{"America/Porto_Acre", AMT},
@@ -459,113 +550,143 @@
{"America/Yellowknife", MST},
{"Antarctica/Casey", WST_AUS},
{"Antarctica/Davis", new String[] {"Davis Time", "DAVT",
- "Davis Summer Time", "DAVST"}},
+ "Davis Summer Time", "DAVST",
+ "Davis Time", "DAVT"}},
{"Antarctica/DumontDUrville", new String[] {"Dumont-d'Urville Time", "DDUT",
- "Dumont-d'Urville Summer Time", "DDUST"}},
+ "Dumont-d'Urville Summer Time", "DDUST",
+ "Dumont-d'Urville Time", "DDUT"}},
{"Antarctica/Macquarie", new String[] {"Macquarie Island Time", "MIST",
- "Macquarie Island Summer Time", "MIST"}},
+ "Macquarie Island Summer Time", "MIST",
+ "Macquarie Island Time", "MIST"}},
{"Antarctica/Mawson", new String[] {"Mawson Time", "MAWT",
- "Mawson Summer Time", "MAWST"}},
+ "Mawson Summer Time", "MAWST",
+ "Mawson Time", "MAWT"}},
{"Antarctica/McMurdo", NZST},
{"Antarctica/Palmer", CLT},
{"Antarctica/Rothera", new String[] {"Rothera Time", "ROTT",
- "Rothera Summer Time", "ROTST"}},
+ "Rothera Summer Time", "ROTST",
+ "Rothera Time", "ROTT"}},
{"Antarctica/South_Pole", NZST},
{"Antarctica/Syowa", new String[] {"Syowa Time", "SYOT",
- "Syowa Summer Time", "SYOST"}},
+ "Syowa Summer Time", "SYOST",
+ "Syowa Time", "SYOT"}},
{"Antarctica/Vostok", new String[] {"Vostok Time", "VOST",
- "Vostok Summer Time", "VOSST"}},
+ "Vostok Summer Time", "VOSST",
+ "Vostok Time", "VOST"}},
{"Arctic/Longyearbyen", CET},
{"Asia/Aden", ARAST},
{"Asia/Almaty", new String[] {"Alma-Ata Time", "ALMT",
- "Alma-Ata Summer Time", "ALMST"}},
+ "Alma-Ata Summer Time", "ALMST",
+ "Alma-Ata Time", "ALMT"}},
{"Asia/Amman", EET},
{"Asia/Anadyr", new String[] {"Anadyr Time", "ANAT",
- "Anadyr Summer Time", "ANAST"}},
+ "Anadyr Summer Time", "ANAST",
+ "Anadyr Time", "ANAT"}},
{"Asia/Aqtau", new String[] {"Aqtau Time", "AQTT",
- "Aqtau Summer Time", "AQTST"}},
+ "Aqtau Summer Time", "AQTST",
+ "Aqtau Time", "AQTT"}},
{"Asia/Aqtobe", new String[] {"Aqtobe Time", "AQTT",
- "Aqtobe Summer Time", "AQTST"}},
+ "Aqtobe Summer Time", "AQTST",
+ "Aqtobe Time", "AQTT"}},
{"Asia/Ashgabat", TMT},
{"Asia/Ashkhabad", TMT},
{"Asia/Baghdad", ARAST},
{"Asia/Bahrain", ARAST},
{"Asia/Baku", new String[] {"Azerbaijan Time", "AZT",
- "Azerbaijan Summer Time", "AZST"}},
+ "Azerbaijan Summer Time", "AZST",
+ "Azerbaijan Time", "AZT"}},
{"Asia/Bangkok", ICT},
{"Asia/Beirut", EET},
{"Asia/Bishkek", new String[] {"Kirgizstan Time", "KGT",
- "Kirgizstan Summer Time", "KGST"}},
+ "Kirgizstan Summer Time", "KGST",
+ "Kirgizstan Time", "KGT"}},
{"Asia/Brunei", new String[] {"Brunei Time", "BNT",
- "Brunei Summer Time", "BNST"}},
+ "Brunei Summer Time", "BNST",
+ "Brunei Time", "BNT"}},
{"Asia/Calcutta", IST},
{"Asia/Choibalsan", new String[] {"Choibalsan Time", "CHOT",
- "Choibalsan Summer Time", "CHOST"}},
+ "Choibalsan Summer Time", "CHOST",
+ "Choibalsan Time", "CHOT"}},
{"Asia/Chongqing", CTT},
{"Asia/Chungking", CTT},
{"Asia/Colombo", IST},
{"Asia/Dacca", BDT},
{"Asia/Dhaka", BDT},
{"Asia/Dili", new String[] {"Timor-Leste Time", "TLT",
- "Timor-Leste Summer Time", "TLST"}},
+ "Timor-Leste Summer Time", "TLST",
+ "Timor-Leste Time", "TLT"}},
{"Asia/Damascus", EET},
{"Asia/Dubai", GST},
{"Asia/Dushanbe", new String[] {"Tajikistan Time", "TJT",
- "Tajikistan Summer Time", "TJST"}},
+ "Tajikistan Summer Time", "TJST",
+ "Tajikistan Time", "TJT"}},
{"Asia/Gaza", EET},
{"Asia/Harbin", CTT},
{"Asia/Hebron", EET},
{"Asia/Ho_Chi_Minh", ICT},
{"Asia/Hong_Kong", HKT},
{"Asia/Hovd", new String[] {"Hovd Time", "HOVT",
- "Hovd Summer Time", "HOVST"}},
+ "Hovd Summer Time", "HOVST",
+ "Hovd Time", "HOVT"}},
{"Asia/Irkutsk", new String[] {"Irkutsk Time", "IRKT",
- "Irkutsk Summer Time", "IRKST"}},
+ "Irkutsk Summer Time", "IRKST",
+ "Irkutsk Time", "IRKT"}},
{"Asia/Istanbul", EET},
{"Asia/Jakarta", WIT},
{"Asia/Jayapura", new String[] {"East Indonesia Time", "EIT",
- "East Indonesia Summer Time", "EIST"}},
+ "East Indonesia Summer Time", "EIST",
+ "East Indonesia Time", "EIT"}},
{"Asia/Kabul", new String[] {"Afghanistan Time", "AFT",
- "Afghanistan Summer Time", "AFST"}},
+ "Afghanistan Summer Time", "AFST",
+ "Afghanistan Time", "AFT"}},
{"Asia/Kamchatka", new String[] {"Petropavlovsk-Kamchatski Time", "PETT",
- "Petropavlovsk-Kamchatski Summer Time", "PETST"}},
+ "Petropavlovsk-Kamchatski Summer Time", "PETST",
+ "Petropavlovsk-Kamchatski Time", "PETT"}},
{"Asia/Karachi", PKT},
{"Asia/Kashgar", CTT},
{"Asia/Kathmandu", NPT},
{"Asia/Katmandu", NPT},
{"Asia/Kolkata", IST},
{"Asia/Krasnoyarsk", new String[] {"Krasnoyarsk Time", "KRAT",
- "Krasnoyarsk Summer Time", "KRAST"}},
+ "Krasnoyarsk Summer Time", "KRAST",
+ "Krasnoyarsk Time", "KRAT"}},
{"Asia/Kuala_Lumpur", MYT},
{"Asia/Kuching", MYT},
{"Asia/Kuwait", ARAST},
{"Asia/Macao", CTT},
{"Asia/Macau", CTT},
{"Asia/Magadan", new String[] {"Magadan Time", "MAGT",
- "Magadan Summer Time", "MAGST"}},
+ "Magadan Summer Time", "MAGST",
+ "Magadan Time", "MAGT"}},
{"Asia/Makassar", CIT},
{"Asia/Manila", new String[] {"Philippines Time", "PHT",
- "Philippines Summer Time", "PHST"}},
+ "Philippines Summer Time", "PHST",
+ "Philippines Time", "PHT"}},
{"Asia/Muscat", GST},
{"Asia/Nicosia", EET},
{"Asia/Novokuznetsk", NOVT},
{"Asia/Novosibirsk", NOVT},
{"Asia/Oral", new String[] {"Oral Time", "ORAT",
- "Oral Summer Time", "ORAST"}},
+ "Oral Summer Time", "ORAST",
+ "Oral Time", "ORAT"}},
{"Asia/Omsk", new String[] {"Omsk Time", "OMST",
- "Omsk Summer Time", "OMSST"}},
+ "Omsk Summer Time", "OMSST",
+ "Omsk Time", "OMST"}},
{"Asia/Phnom_Penh", ICT},
{"Asia/Pontianak", WIT},
{"Asia/Pyongyang", KST},
{"Asia/Qatar", ARAST},
{"Asia/Qyzylorda", new String[] {"Qyzylorda Time", "QYZT",
- "Qyzylorda Summer Time", "QYZST"}},
+ "Qyzylorda Summer Time", "QYZST",
+ "Qyzylorda Time", "QYZT"}},
{"Asia/Rangoon", new String[] {"Myanmar Time", "MMT",
- "Myanmar Summer Time", "MMST"}},
+ "Myanmar Summer Time", "MMST",
+ "Myanmar Time", "MMT"}},
{"Asia/Riyadh", ARAST},
{"Asia/Saigon", ICT},
{"Asia/Sakhalin", new String[] {"Sakhalin Time", "SAKT",
- "Sakhalin Summer Time", "SAKST"}},
+ "Sakhalin Summer Time", "SAKST",
+ "Sakhalin Time", "SAKT"}},
{"Asia/Samarkand", UZT},
{"Asia/Seoul", KST},
{"Asia/Singapore", SGT},
@@ -573,7 +694,8 @@
{"Asia/Tel_Aviv", ISRAEL},
{"Asia/Tashkent", UZT},
{"Asia/Tbilisi", new String[] {"Georgia Time", "GET",
- "Georgia Summer Time", "GEST"}},
+ "Georgia Summer Time", "GEST",
+ "Georgia Time", "GET"}},
{"Asia/Tehran", IRT},
{"Asia/Thimbu", BTT},
{"Asia/Thimphu", BTT},
@@ -583,28 +705,35 @@
{"Asia/Urumqi", CTT},
{"Asia/Vientiane", ICT},
{"Asia/Vladivostok", new String[] {"Vladivostok Time", "VLAT",
- "Vladivostok Summer Time", "VLAST"}},
+ "Vladivostok Summer Time", "VLAST",
+ "Vladivostok Time", "VLAT"}},
{"Asia/Yakutsk", new String[] {"Yakutsk Time", "YAKT",
- "Yakutsk Summer Time", "YAKST"}},
+ "Yakutsk Summer Time", "YAKST",
+ "Yakutsk Time", "YAKT"}},
{"Asia/Yekaterinburg", new String[] {"Yekaterinburg Time", "YEKT",
- "Yekaterinburg Summer Time", "YEKST"}},
+ "Yekaterinburg Summer Time", "YEKST",
+ "Yekaterinburg Time", "YEKT"}},
{"Asia/Yerevan", ARMT},
{"Atlantic/Azores", new String[] {"Azores Time", "AZOT",
- "Azores Summer Time", "AZOST"}},
+ "Azores Summer Time", "AZOST",
+ "Azores Time", "AZOT"}},
{"Atlantic/Bermuda", AST},
{"Atlantic/Canary", WET},
{"Atlantic/Cape_Verde", new String[] {"Cape Verde Time", "CVT",
- "Cape Verde Summer Time", "CVST"}},
+ "Cape Verde Summer Time", "CVST",
+ "Cape Verde Time", "CVT"}},
{"Atlantic/Faeroe", WET},
{"Atlantic/Faroe", WET},
{"Atlantic/Jan_Mayen", CET},
{"Atlantic/Madeira", WET},
{"Atlantic/Reykjavik", GMT},
{"Atlantic/South_Georgia", new String[] {"South Georgia Standard Time", "GST",
- "South Georgia Daylight Time", "GDT"}},
+ "South Georgia Daylight Time", "GDT",
+ "South Georgia Time", "GT"}},
{"Atlantic/St_Helena", GMT},
{"Atlantic/Stanley", new String[] {"Falkland Is. Time", "FKT",
- "Falkland Is. Summer Time", "FKST"}},
+ "Falkland Is. Summer Time", "FKST",
+ "Falkland Is. Time", "FKT"}},
{"Australia/ACT", EST_NSW},
{"Australia/Adelaide", ADELAIDE},
{"Australia/Brisbane", BRISBANE},
@@ -613,7 +742,8 @@
{"Australia/Currie", EST_NSW},
{"Australia/Darwin", DARWIN},
{"Australia/Eucla", new String[] {"Central Western Standard Time (Australia)", "CWST",
- "Central Western Summer Time (Australia)", "CWST"}},
+ "Central Western Summer Time (Australia)", "CWST",
+ "Central Western Time (Australia)", "CWT"}},
{"Australia/Hobart", TASMANIA},
{"Australia/LHI", LORD_HOWE},
{"Australia/Lindeman", BRISBANE},
@@ -697,7 +827,8 @@
{"Europe/Riga", EET},
{"Europe/Rome", CET},
{"Europe/Samara", new String[] {"Samara Time", "SAMT",
- "Samara Summer Time", "SAMST"}},
+ "Samara Summer Time", "SAMST",
+ "Samara Time", "SAMT"}},
{"Europe/San_Marino", CET},
{"Europe/Sarajevo", CET},
{"Europe/Simferopol", EET},
@@ -713,7 +844,8 @@
{"Europe/Vienna", CET},
{"Europe/Vilnius", EET},
{"Europe/Volgograd", new String[] {"Volgograd Time", "VOLT",
- "Volgograd Summer Time", "VOLST"}},
+ "Volgograd Summer Time", "VOLST",
+ "Volgograd Time", "VOLT"}},
{"Europe/Warsaw", CET},
{"Europe/Zagreb", CET},
{"Europe/Zaporozhye", EET},
@@ -727,30 +859,39 @@
{"IST", IST},
{"Indian/Antananarivo", EAT},
{"Indian/Chagos", new String[] {"Indian Ocean Territory Time", "IOT",
- "Indian Ocean Territory Summer Time", "IOST"}},
+ "Indian Ocean Territory Summer Time", "IOST",
+ "Indian Ocean Territory Time", "IOT"}},
{"Indian/Christmas", new String[] {"Christmas Island Time", "CXT",
- "Christmas Island Summer Time", "CXST"}},
+ "Christmas Island Summer Time", "CXST",
+ "Christmas Island Time", "CIT"}},
{"Indian/Cocos", new String[] {"Cocos Islands Time", "CCT",
- "Cocos Islands Summer Time", "CCST"}},
+ "Cocos Islands Summer Time", "CCST",
+ "Cocos Islands Time", "CCT"}},
{"Indian/Comoro", EAT},
{"Indian/Kerguelen", new String[] {"French Southern & Antarctic Lands Time", "TFT",
- "French Southern & Antarctic Lands Summer Time", "TFST"}},
+ "French Southern & Antarctic Lands Summer Time", "TFST",
+ "French Southern & Antarctic Lands Time", "TFT"}},
{"Indian/Mahe", new String[] {"Seychelles Time", "SCT",
- "Seychelles Summer Time", "SCST"}},
+ "Seychelles Summer Time", "SCST",
+ "Seychelles Time", "SCT"}},
{"Indian/Maldives", new String[] {"Maldives Time", "MVT",
- "Maldives Summer Time", "MVST"}},
+ "Maldives Summer Time", "MVST",
+ "Maldives Time", "MVT"}},
{"Indian/Mauritius", new String[] {"Mauritius Time", "MUT",
- "Mauritius Summer Time", "MUST"}},
+ "Mauritius Summer Time", "MUST",
+ "Mauritius Time", "MUT"}},
{"Indian/Mayotte", EAT},
{"Indian/Reunion", new String[] {"Reunion Time", "RET",
- "Reunion Summer Time", "REST"}},
+ "Reunion Summer Time", "REST",
+ "Reunion Time", "RET"}},
{"Israel", ISRAEL},
{"Jamaica", EST},
{"Japan", JST},
{"Kwajalein", MHT},
{"Libya", EET},
{"MET", new String[] {"Middle Europe Time", "MET",
- "Middle Europe Summer Time", "MEST"}},
+ "Middle Europe Summer Time", "MEST",
+ "Middle Europe Time", "MET"}},
{"Mexico/BajaNorte", PST},
{"Mexico/BajaSur", MST},
{"Mexico/General", CST},
@@ -770,61 +911,82 @@
{"Pacific/Chuuk", CHUT},
{"Pacific/Easter", EASTER},
{"Pacific/Efate", new String[] {"Vanuatu Time", "VUT",
- "Vanuatu Summer Time", "VUST"}},
+ "Vanuatu Summer Time", "VUST",
+ "Vanuatu Time", "VUT"}},
{"Pacific/Enderbury", new String[] {"Phoenix Is. Time", "PHOT",
- "Phoenix Is. Summer Time", "PHOST"}},
+ "Phoenix Is. Summer Time", "PHOST",
+ "Phoenix Is. Time", "PHOT"}},
{"Pacific/Fakaofo", new String[] {"Tokelau Time", "TKT",
- "Tokelau Summer Time", "TKST"}},
+ "Tokelau Summer Time", "TKST",
+ "Tokelau Time", "TKT"}},
{"Pacific/Fiji", new String[] {"Fiji Time", "FJT",
- "Fiji Summer Time", "FJST"}},
+ "Fiji Summer Time", "FJST",
+ "Fiji Time", "FJT"}},
{"Pacific/Funafuti", new String[] {"Tuvalu Time", "TVT",
- "Tuvalu Summer Time", "TVST"}},
+ "Tuvalu Summer Time", "TVST",
+ "Tuvalu Time", "TVT"}},
{"Pacific/Galapagos", new String[] {"Galapagos Time", "GALT",
- "Galapagos Summer Time", "GALST"}},
+ "Galapagos Summer Time", "GALST",
+ "Galapagos Time", "GALT"}},
{"Pacific/Gambier", GAMBIER},
{"Pacific/Guadalcanal", SBT},
{"Pacific/Guam", ChST},
{"Pacific/Johnston", HST},
{"Pacific/Kiritimati", new String[] {"Line Is. Time", "LINT",
- "Line Is. Summer Time", "LINST"}},
+ "Line Is. Summer Time", "LINST",
+ "Line Is. Time", "LINT"}},
{"Pacific/Kosrae", new String[] {"Kosrae Time", "KOST",
- "Kosrae Summer Time", "KOSST"}},
+ "Kosrae Summer Time", "KOSST",
+ "Kosrae Time", "KOST"}},
{"Pacific/Kwajalein", MHT},
{"Pacific/Majuro", MHT},
{"Pacific/Marquesas", new String[] {"Marquesas Time", "MART",
- "Marquesas Summer Time", "MARST"}},
+ "Marquesas Summer Time", "MARST",
+ "Marquesas Time", "MART"}},
{"Pacific/Midway", SAMOA},
{"Pacific/Nauru", new String[] {"Nauru Time", "NRT",
- "Nauru Summer Time", "NRST"}},
+ "Nauru Summer Time", "NRST",
+ "Nauru Time", "NRT"}},
{"Pacific/Niue", new String[] {"Niue Time", "NUT",
- "Niue Summer Time", "NUST"}},
+ "Niue Summer Time", "NUST",
+ "Niue Time", "NUT"}},
{"Pacific/Norfolk", new String[] {"Norfolk Time", "NFT",
- "Norfolk Summer Time", "NFST"}},
+ "Norfolk Summer Time", "NFST",
+ "Norfolk Time", "NFT"}},
{"Pacific/Noumea", new String[] {"New Caledonia Time", "NCT",
- "New Caledonia Summer Time", "NCST"}},
+ "New Caledonia Summer Time", "NCST",
+ "New Caledonia Time", "NCT"}},
{"Pacific/Pago_Pago", SAMOA},
{"Pacific/Palau", new String[] {"Palau Time", "PWT",
- "Palau Summer Time", "PWST"}},
+ "Palau Summer Time", "PWST",
+ "Palau Time", "PWT"}},
{"Pacific/Pitcairn", PITCAIRN},
{"Pacific/Pohnpei", PONT},
{"Pacific/Ponape", PONT},
{"Pacific/Port_Moresby", new String[] {"Papua New Guinea Time", "PGT",
- "Papua New Guinea Summer Time", "PGST"}},
+ "Papua New Guinea Summer Time", "PGST",
+ "Papua New Guinea Time", "PGT"}},
{"Pacific/Rarotonga", new String[] {"Cook Is. Time", "CKT",
- "Cook Is. Summer Time", "CKHST"}},
+ "Cook Is. Summer Time", "CKHST",
+ "Cook Is. Time", "CKT"}},
{"Pacific/Saipan", ChST},
{"Pacific/Samoa", SAMOA},
{"Pacific/Tahiti", new String[] {"Tahiti Time", "TAHT",
- "Tahiti Summer Time", "TAHST"}},
+ "Tahiti Summer Time", "TAHST",
+ "Tahiti Time", "TAHT"}},
{"Pacific/Tarawa", new String[] {"Gilbert Is. Time", "GILT",
- "Gilbert Is. Summer Time", "GILST"}},
+ "Gilbert Is. Summer Time", "GILST",
+ "Gilbert Is. Time", "GILT"}},
{"Pacific/Tongatapu", new String[] {"Tonga Time", "TOT",
- "Tonga Summer Time", "TOST"}},
+ "Tonga Summer Time", "TOST",
+ "Tonga Time", "TOT"}},
{"Pacific/Truk", CHUT},
{"Pacific/Wake", new String[] {"Wake Time", "WAKT",
- "Wake Summer Time", "WAKST"}},
+ "Wake Summer Time", "WAKST",
+ "Wake Time", "WAKT"}},
{"Pacific/Wallis", new String[] {"Wallis & Futuna Time", "WFT",
- "Wallis & Futuna Summer Time", "WFST"}},
+ "Wallis & Futuna Summer Time", "WFST",
+ "Wallis & Futuna Time", "WFT"}},
{"Pacific/Yap", CHUT},
{"Poland", CET},
{"PRC", CTT},
--- a/jdk/src/share/classes/sun/util/resources/TimeZoneNamesBundle.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/classes/sun/util/resources/TimeZoneNamesBundle.java Mon Dec 17 08:30:06 2012 -0500
@@ -42,6 +42,9 @@
import java.util.Map;
import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.MissingResourceException;
+import java.util.Set;
/**
* Subclass of <code>ResourceBundle</code> with special
@@ -58,6 +61,26 @@
public abstract class TimeZoneNamesBundle extends OpenListResourceBundle {
/**
+ * Returns a String array containing time zone names. The String array has
+ * at most size elements.
+ *
+ * @param key the time zone ID for which names are obtained
+ * @param size the requested size of array for names
+ * @return a String array containing names
+ */
+ public String[] getStringArray(String key, int size) {
+ String[] names = handleGetObject(key, size);
+ if ((names == null || names.length != size) && parent != null) {
+ names = ((TimeZoneNamesBundle)parent).getStringArray(key, size);
+ }
+ if (names == null) {
+ throw new MissingResourceException("no time zone names", getClass().getName(), key);
+ }
+ return names;
+
+ }
+
+ /**
* Maps time zone IDs to locale-specific names.
* The value returned is an array of five strings:
* <ul>
@@ -71,13 +94,17 @@
* <code>getContents</code> implementations, while the time zone
* ID is inserted into the returned array by this method.
*/
+ @Override
public Object handleGetObject(String key) {
+ return handleGetObject(key, 5);
+ }
+
+ private String[] handleGetObject(String key, int n) {
String[] contents = (String[]) super.handleGetObject(key);
if (contents == null) {
return null;
}
-
- int clen = contents.length;
+ int clen = Math.min(n, contents.length);
String[] tmpobj = new String[clen+1];
tmpobj[0] = key;
System.arraycopy(contents, 0, tmpobj, 1, clen);
@@ -85,14 +112,24 @@
}
/**
- * Use LinkedHashMap to preserve order of bundle entries.
+ * Use LinkedHashMap to preserve the order of bundle entries.
*/
@Override
- protected Map<String, Object> createMap(int size) {
+ protected <K, V> Map<K, V> createMap(int size) {
return new LinkedHashMap<>(size);
}
/**
+ * Use LinkedHashSet to preserve the key order.
+ * @param <E> the type of elements
+ * @return a Set
+ */
+ @Override
+ protected <E> Set<E> createSet() {
+ return new LinkedHashSet<>();
+ }
+
+ /**
* Provides key/value mappings for a specific
* resource bundle. Each entry of the array
* returned must be an array with two elements:
--- a/jdk/src/share/javavm/export/classfile_constants.h Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/javavm/export/classfile_constants.h Mon Dec 17 08:30:06 2012 -0500
@@ -31,7 +31,7 @@
#endif
/* Classfile version number for this information */
-#define JVM_CLASSFILE_MAJOR_VERSION 51
+#define JVM_CLASSFILE_MAJOR_VERSION 52
#define JVM_CLASSFILE_MINOR_VERSION 0
/* Flags */
--- a/jdk/src/share/native/java/net/Inet6Address.c Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/share/native/java/net/Inet6Address.c Mon Dec 17 08:30:06 2012 -0500
@@ -38,7 +38,6 @@
jfieldID ia6_cachedscopeidID;
jfieldID ia6_scopeidsetID;
jfieldID ia6_scopeifnameID;
-jfieldID ia6_scopeifnamesetID;
jmethodID ia6_ctrID;
/*
@@ -62,8 +61,6 @@
CHECK_NULL(ia6_scopeidID);
ia6_scopeifnameID = (*env)->GetFieldID(env, ia6_class, "scope_ifname", "Ljava/net/NetworkInterface;");
CHECK_NULL(ia6_scopeifnameID);
- ia6_scopeifnamesetID = (*env)->GetFieldID(env, ia6_class, "scope_ifname_set", "Z");
- CHECK_NULL(ia6_scopeifnamesetID);
ia6_ctrID = (*env)->GetMethodID(env, ia6_class, "<init>", "()V");
CHECK_NULL(ia6_ctrID);
}
--- a/jdk/src/solaris/classes/sun/awt/X11/XToolkit.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/solaris/classes/sun/awt/X11/XToolkit.java Mon Dec 17 08:30:06 2012 -0500
@@ -1332,6 +1332,15 @@
return new XInputMethodDescriptor();
}
+ /**
+ * Returns whether enableInputMethods should be set to true for peered
+ * TextComponent instances on this platform. True by default.
+ */
+ @Override
+ public boolean enableInputMethodsForTextComponent() {
+ return true;
+ }
+
static int getMultiClickTime() {
if (awt_multiclick_time == 0) {
initializeMultiClickTime();
--- a/jdk/src/solaris/native/sun/xawt/XlibWrapper.c Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/solaris/native/sun/xawt/XlibWrapper.c Mon Dec 17 08:30:06 2012 -0500
@@ -1260,13 +1260,15 @@
JavaVM* jvm = NULL;
static int ToolkitErrorHandler(Display * dpy, XErrorEvent * event) {
+ JNIEnv * env;
if (jvm != NULL) {
- JNIEnv * env = (JNIEnv *)JNU_GetEnv(jvm, JNI_VERSION_1_2);
- return JNU_CallStaticMethodByName(env, NULL, "sun/awt/X11/XToolkit", "globalErrorHandler", "(JJ)I",
- ptr_to_jlong(dpy), ptr_to_jlong(event)).i;
- } else {
- return 0;
+ env = (JNIEnv *)JNU_GetEnv(jvm, JNI_VERSION_1_2);
+ if (env) {
+ return JNU_CallStaticMethodByName(env, NULL, "sun/awt/X11/XToolkit", "globalErrorHandler", "(JJ)I",
+ ptr_to_jlong(dpy), ptr_to_jlong(event)).i;
+ }
}
+ return 0;
}
/*
--- a/jdk/src/windows/classes/sun/nio/fs/WindowsFileAttributes.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/windows/classes/sun/nio/fs/WindowsFileAttributes.java Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -383,20 +383,14 @@
}
int volSerialNumber() {
- if (volSerialNumber == 0)
- throw new AssertionError("Should not get here");
return volSerialNumber;
}
int fileIndexHigh() {
- if (volSerialNumber == 0)
- throw new AssertionError("Should not get here");
return fileIndexHigh;
}
int fileIndexLow() {
- if (volSerialNumber == 0)
- throw new AssertionError("Should not get here");
return fileIndexLow;
}
--- a/jdk/src/windows/native/java/net/NetworkInterface.c Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/src/windows/native/java/net/NetworkInterface.c Mon Dec 17 08:30:06 2012 -0500
@@ -178,7 +178,7 @@
int count;
netif *netifP;
DWORD i;
- int lo=0, eth=0, tr=0, fddi=0, ppp=0, sl=0, wlan=0, net=0;
+ int lo=0, eth=0, tr=0, fddi=0, ppp=0, sl=0, wlan=0, net=0, wlen=0;
/*
* Ask the IP Helper library to enumerate the adapters
@@ -260,8 +260,17 @@
*/
curr = (netif *)calloc(1, sizeof(netif));
if (curr != NULL) {
+ wlen = MultiByteToWideChar(CP_OEMCP, 0, ifrowP->bDescr,
+ ifrowP->dwDescrLen, NULL, 0);
+ if(wlen == 0) {
+ // MultiByteToWideChar should not fail
+ // But in rare case it fails, we allow 'char' to be displayed
+ curr->displayName = (char *)malloc(ifrowP->dwDescrLen + 1);
+ } else {
+ curr->displayName = (wchar_t *)malloc(wlen*(sizeof(wchar_t))+1);
+ }
+
curr->name = (char *)malloc(strlen(dev_name) + 1);
- curr->displayName = (char *)malloc(ifrowP->dwDescrLen + 1);
if (curr->name == NULL || curr->displayName == NULL) {
if (curr->name) free(curr->name);
@@ -282,8 +291,29 @@
* 32-bit numbers as index values.
*/
strcpy(curr->name, dev_name);
- strncpy(curr->displayName, ifrowP->bDescr, ifrowP->dwDescrLen);
- curr->displayName[ifrowP->dwDescrLen] = '\0';
+ if (wlen == 0) {
+ // display char type in case of MultiByteToWideChar failure
+ strncpy(curr->displayName, ifrowP->bDescr, ifrowP->dwDescrLen);
+ curr->displayName[ifrowP->dwDescrLen] = '\0';
+ } else {
+ // call MultiByteToWideChar again to fill curr->displayName
+ // it should not fail, because we have called it once before
+ if (MultiByteToWideChar(CP_OEMCP, 0, ifrowP->bDescr,
+ ifrowP->dwDescrLen, curr->displayName, wlen) == 0) {
+ JNU_ThrowByName(env, "java/lang/Error",
+ "Cannot get multibyte char for interface display name");
+ free_netif(netifP);
+ free(tableP);
+ free(curr->name);
+ free(curr->displayName);
+ free(curr);
+ return -1;
+ } else {
+ curr->displayName[wlen*(sizeof(wchar_t))] = '\0';
+ curr->dNameIsUnicode = TRUE;
+ }
+ }
+
curr->dwIndex = ifrowP->dwIndex;
curr->ifType = ifrowP->dwType;
curr->index = GetFriendlyIfIndex(ifrowP->dwIndex);
--- a/jdk/test/Makefile Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/Makefile Mon Dec 17 08:30:06 2012 -0500
@@ -505,7 +505,7 @@
demo/jvmti demo/zipfs sample \
javax/naming com/sun/jndi \
javax/script \
- javax/sql \
+ java/sql javax/sql \
javax/smartcardio \
javax/xml/ws com/sun/internal/ws \
jdk/asm \
--- a/jdk/test/ProblemList.txt Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/ProblemList.txt Mon Dec 17 08:30:06 2012 -0500
@@ -263,10 +263,6 @@
# 7164518: no PortUnreachableException on Mac
sun/security/krb5/auto/Unreachable.java macosx-all
-# 7193792
-sun/security/pkcs11/ec/TestECDSA.java solaris-all
-sun/security/pkcs11/ec/TestECDSA.java linux-all
-
# 7193793
sun/security/pkcs11/ec/TestECDH.java linux-all
--- a/jdk/test/com/oracle/net/sanity.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/com/oracle/net/sanity.sh Mon Dec 17 08:30:06 2012 -0500
@@ -63,4 +63,4 @@
export CLASSPATH
# Run sanity test (IPv4-only for now)
-$JAVA -Djava.net.preferIPv4Stack=true Sanity ${IB_LINKS}
+$JAVA ${TESTVMOPTS} -Djava.net.preferIPv4Stack=true Sanity ${IB_LINKS}
--- a/jdk/test/com/sun/corba/cachedSocket/7056731.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/com/sun/corba/cachedSocket/7056731.sh Mon Dec 17 08:30:06 2012 -0500
@@ -64,12 +64,12 @@
sleep 2 #give orbd time to start
echo "started orb"
echo "starting server"
-${TESTJAVA}${FS}bin${FS}java -cp . HelloServer -ORBInitialPort $PORT -ORBInitialHost localhost &
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -cp . HelloServer -ORBInitialPort $PORT -ORBInitialHost localhost &
SERVER_PROC=$!
sleep 2 #give server time to start
echo "started server"
echo "starting client (debug mode)"
-${TESTJAVA}${FS}bin${FS}java -cp . -agentlib:jdwp=transport=dt_socket,server=y,address=8000 HelloClient -ORBInitialPort $PORT -ORBInitialHost localhost > client.$$ 2>&1 &
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -cp . -agentlib:jdwp=transport=dt_socket,server=y,address=8000 HelloClient -ORBInitialPort $PORT -ORBInitialHost localhost > client.$$ 2>&1 &
JVM_PROC=$!
sleep 2 #give jvm/debugger/client time to start
--- a/jdk/test/com/sun/management/OperatingSystemMXBean/TestTotalSwap.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/com/sun/management/OperatingSystemMXBean/TestTotalSwap.sh Mon Dec 17 08:30:06 2012 -0500
@@ -59,7 +59,7 @@
runOne()
{
echo "runOne $@"
- $TESTJAVA/bin/java -classpath $TESTCLASSES $@ || exit 3
+ $TESTJAVA/bin/java ${TESTVMOPTS} -classpath $TESTCLASSES $@ || exit 3
}
solaris_swap_size()
--- a/jdk/test/com/sun/management/UnixOperatingSystemMXBean/GetMaxFileDescriptorCount.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/com/sun/management/UnixOperatingSystemMXBean/GetMaxFileDescriptorCount.sh Mon Dec 17 08:30:06 2012 -0500
@@ -43,7 +43,7 @@
{
echo "runOne $@"
$TESTJAVA/bin/javac -d $TESTCLASSES $TESTSRC/$@.java || exit 2
- $TESTJAVA/bin/java -classpath $TESTCLASSES $@ || exit 3
+ $TESTJAVA/bin/java ${TESTVMOPTS} -classpath $TESTCLASSES $@ || exit 3
}
# Test GetMaxFileDescriptorCount if we are running on Unix
--- a/jdk/test/com/sun/management/UnixOperatingSystemMXBean/GetOpenFileDescriptorCount.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/com/sun/management/UnixOperatingSystemMXBean/GetOpenFileDescriptorCount.sh Mon Dec 17 08:30:06 2012 -0500
@@ -43,7 +43,7 @@
{
echo "runOne $@"
$TESTJAVA/bin/javac -d $TESTCLASSES $TESTSRC/$@.java || exit 2
- $TESTJAVA/bin/java -classpath $TESTCLASSES $@ || exit 3
+ $TESTJAVA/bin/java ${TESTVMOPTS} -classpath $TESTCLASSES $@ || exit 3
}
# Test GetOpenFileDescriptorCount if we are running on Unix
--- a/jdk/test/com/sun/tools/attach/ApplicationSetup.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/com/sun/tools/attach/ApplicationSetup.sh Mon Dec 17 08:30:06 2012 -0500
@@ -41,7 +41,7 @@
startApplication()
{
# put all output from the app into ${OUTPUTFILE}
- ${JAVA} $1 $2 $3 -jar "${TESTCLASSES}"/Application.jar > ${OUTPUTFILE} 2>&1 &
+ ${JAVA} ${TESTVMOPTS} $1 $2 $3 -jar "${TESTCLASSES}"/Application.jar > ${OUTPUTFILE} 2>&1 &
pid="$!"
# MKS creates an intermediate shell to launch ${JAVA} so
@@ -78,6 +78,6 @@
stopApplication()
{
- $JAVA -classpath "${TESTCLASSES}" Shutdown $1
+ $JAVA ${TESTVMOPTS} -classpath "${TESTCLASSES}" Shutdown $1
}
--- a/jdk/test/com/sun/tools/attach/BasicTests.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/com/sun/tools/attach/BasicTests.sh Mon Dec 17 08:30:06 2012 -0500
@@ -61,7 +61,7 @@
echo "Running tests ..."
-$JAVA -classpath "${TESTCLASSES}${PS}${TESTJAVA}/lib/tools.jar" \
+$JAVA ${TESTVMOPTS} -classpath "${TESTCLASSES}${PS}${TESTJAVA}/lib/tools.jar" \
BasicTests $pid $agent $badagent $redefineagent 2>&1
if [ $? != 0 ]; then failures=`expr $failures + 1`; fi
--- a/jdk/test/com/sun/tools/attach/PermissionTests.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/com/sun/tools/attach/PermissionTests.sh Mon Dec 17 08:30:06 2012 -0500
@@ -48,7 +48,7 @@
echo "Deny test"
# deny
-$JAVA -classpath "${TESTCLASSES}${PS}${TESTJAVA}/lib/tools.jar" \
+$JAVA ${TESTVMOPTS} -classpath "${TESTCLASSES}${PS}${TESTJAVA}/lib/tools.jar" \
-Djava.security.manager \
-Djava.security.policy=${TESTSRC}/java.policy.deny \
PermissionTest $pid true 2>&1
@@ -56,7 +56,7 @@
# allow
echo "Allow test"
-$JAVA -classpath "${TESTCLASSES}${PS}${TESTJAVA}/lib/tools.jar" \
+$JAVA ${TESTVMOPTS} -classpath "${TESTCLASSES}${PS}${TESTJAVA}/lib/tools.jar" \
-Djava.security.manager \
-Djava.security.policy=${TESTSRC}/java.policy.allow \
PermissionTest $pid false 2>&1
--- a/jdk/test/com/sun/tools/attach/ProviderTests.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/com/sun/tools/attach/ProviderTests.sh Mon Dec 17 08:30:06 2012 -0500
@@ -45,7 +45,7 @@
echo "Running test ..."
-$JAVA -classpath \
+$JAVA ${TESTVMOPTS} -classpath \
"${TESTCLASSES}${PS}${TESTCLASSES}/SimpleProvider.jar${PS}${TESTJAVA}/lib/tools.jar" \
ProviderTest
--- a/jdk/test/com/sun/tools/extcheck/TestExtcheckArgs.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/com/sun/tools/extcheck/TestExtcheckArgs.sh Mon Dec 17 08:30:06 2012 -0500
@@ -39,7 +39,7 @@
exit $rc
fi
-${TESTJAVA}/bin/java -classpath ${TESTJAVA}/lib/tools.jar${PS}${TESTCLASSES} TestExtcheckArgs
+${TESTJAVA}/bin/java ${TESTVMOPTS} -classpath ${TESTJAVA}/lib/tools.jar${PS}${TESTCLASSES} TestExtcheckArgs
rc=$?
if [ $rc != 0 ]; then
echo Execution failure with exit status $rc
--- a/jdk/test/demo/zipfs/basic.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/demo/zipfs/basic.sh Mon Dec 17 08:30:06 2012 -0500
@@ -53,7 +53,7 @@
go() {
echo ""
- ${TESTJAVA}/bin/java $1 $2 $3 2>&1
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} $1 $2 $3 2>&1
if [ $? != 0 ]; then failures=`expr $failures + 1`; fi
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/awt/Frame/FrameSetSizeStressTest/FrameSetSizeStressTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.awt.Frame;
+
+/*
+ @test
+ @bug 7177173
+ @summary setBounds can cause StackOverflow in case of the considerable loading
+ @author Sergey Bylokhov
+*/
+public final class FrameSetSizeStressTest {
+
+ public static void main(final String[] args) {
+ final Frame frame = new Frame();
+ frame.setSize(200, 200);
+ frame.setLocationRelativeTo(null);
+ frame.setVisible(true);
+ for (int i = 0; i < 1000; ++i) {
+ frame.setSize(100, 100);
+ frame.setSize(200, 200);
+ frame.setSize(300, 300);
+ }
+ frame.dispose();
+ }
+}
--- a/jdk/test/java/io/File/GetXSpace.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/io/File/GetXSpace.sh Mon Dec 17 08:30:06 2012 -0500
@@ -53,7 +53,7 @@
}
runTest() {
- ${TESTJAVA}/bin/java -cp ${TESTCLASSES} GetXSpace $*
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} -cp ${TESTCLASSES} GetXSpace $*
if [ $? -eq 0 ]
then echo "Passed"
else
--- a/jdk/test/java/io/File/MacPathTest.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/io/File/MacPathTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -36,4 +36,4 @@
TESTCLASSES=.
fi
-export LC_ALL=en_US.UTF-8 ;${TESTJAVA}/bin/java -cp ${TESTCLASSES} MacPathTest
+export LC_ALL=en_US.UTF-8 ;${TESTJAVA}/bin/java ${TESTVMOPTS} -cp ${TESTCLASSES} MacPathTest
--- a/jdk/test/java/io/File/basic.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/io/File/basic.sh Mon Dec 17 08:30:06 2012 -0500
@@ -44,7 +44,7 @@
;;
esac
mkdir x.Basic.dir
-if $TESTJAVA/bin/java $* -classpath "$TESTCLASSES" Basic; then
+if $TESTJAVA/bin/java ${TESTVMOPTS} $* -classpath "$TESTCLASSES" Basic; then
[ -f x.Basic.rw ] && (echo "x.Basic.rw not deleted"; exit 1)
([ -d x.Basic.dir ] || [ \! -d x.Basic.dir2 ]) \
&& (echo "x.Basic.dir not renamed"; exit 1)
--- a/jdk/test/java/io/FileOutputStream/FileOpen.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/io/FileOutputStream/FileOpen.sh Mon Dec 17 08:30:06 2012 -0500
@@ -50,20 +50,20 @@
${TESTJAVA}/bin/javac -d . ${TESTSRC}\\FileOpenNeg.java
echo "Opening Writable Normal File.."
- ${TESTJAVA}/bin/java FileOpenPos ${hfile}
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} FileOpenPos ${hfile}
echo "Opening Writable Hidden File.."
${ATTRIB} +h ${hfile}
- ${TESTJAVA}/bin/java FileOpenNeg ${hfile}
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} FileOpenNeg ${hfile}
echo "Opening Read-Only Normal File.."
${ATTRIB} -h ${hfile}
${ATTRIB} +r ${hfile}
- ${TESTJAVA}/bin/java FileOpenNeg ${hfile}
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} FileOpenNeg ${hfile}
echo "Opening Read-Only Hidden File.."
${ATTRIB} +h ${hfile}
- ${TESTJAVA}/bin/java FileOpenNeg ${hfile}
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} FileOpenNeg ${hfile}
rm -f ${hfile}
exit
--- a/jdk/test/java/io/Serializable/class/run.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/io/Serializable/class/run.sh Mon Dec 17 08:30:06 2012 -0500
@@ -45,85 +45,85 @@
rm -f A.java
cp ${TESTSRC}/NonSerialA_1.java A.java
${TESTJAVA}/bin/javac A.java
-${TESTJAVA}/bin/java Test -s A
-${TESTJAVA}/bin/java Test -d
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test -s A
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test -d
echo
echo Write NonSerial1, Read NonSerial2
rm -f A.java
cp ${TESTSRC}/NonSerialA_1.java A.java
${TESTJAVA}/bin/javac A.java
-${TESTJAVA}/bin/java Test -s A
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test -s A
rm -f A.java
cp ${TESTSRC}/NonSerialA_2.java A.java
${TESTJAVA}/bin/javac A.java
-${TESTJAVA}/bin/java Test -d
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test -d
echo
echo Write NonSerial1, Read Serial1
rm -f A.java
cp ${TESTSRC}/NonSerialA_1.java A.java
${TESTJAVA}/bin/javac A.java
-${TESTJAVA}/bin/java Test -s A
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test -s A
rm -f A.java
cp ${TESTSRC}/SerialA_1.java A.java
${TESTJAVA}/bin/javac A.java
-${TESTJAVA}/bin/java Test -d
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test -d
echo
echo Write Serial1, Read NonSerial1
rm -f A.java
cp ${TESTSRC}/SerialA_1.java A.java
${TESTJAVA}/bin/javac A.java
-${TESTJAVA}/bin/java Test -s A
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test -s A
rm -f A.java
cp ${TESTSRC}/NonSerialA_1.java A.java
${TESTJAVA}/bin/javac A.java
-${TESTJAVA}/bin/java Test -doe
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test -doe
echo
echo Write Serial1, Read Serial2
rm -f A.java
cp ${TESTSRC}/SerialA_1.java A.java
${TESTJAVA}/bin/javac A.java
-${TESTJAVA}/bin/java Test -s A
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test -s A
rm -f A.java
cp ${TESTSRC}/SerialA_2.java A.java
${TESTJAVA}/bin/javac A.java
-${TESTJAVA}/bin/java Test -d
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test -d
echo
echo Write Serial2, Read Serial1
rm -f A.java
cp ${TESTSRC}/SerialA_2.java A.java
${TESTJAVA}/bin/javac A.java
-${TESTJAVA}/bin/java Test -s A
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test -s A
rm -f A.java
cp ${TESTSRC}/SerialA_1.java A.java
${TESTJAVA}/bin/javac A.java
-${TESTJAVA}/bin/java Test -d
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test -d
echo
echo Write Serial1, Read Serial3
rm -f A.java
cp ${TESTSRC}/SerialA_1.java A.java
${TESTJAVA}/bin/javac A.java
-${TESTJAVA}/bin/java Test -s A
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test -s A
rm -f A.java
cp ${TESTSRC}/SerialA_3.java A.java
${TESTJAVA}/bin/javac A.java
-${TESTJAVA}/bin/java Test -de
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test -de
echo
echo Write Serial3, Read Serial1
rm -f A.java
cp ${TESTSRC}/SerialA_3.java A.java
${TESTJAVA}/bin/javac A.java
-${TESTJAVA}/bin/java Test -s A
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test -s A
rm -f A.java
cp ${TESTSRC}/SerialA_1.java A.java
${TESTJAVA}/bin/javac A.java
-${TESTJAVA}/bin/java Test -de
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test -de
echo
echo Passed
--- a/jdk/test/java/io/Serializable/evolution/AddedExternField/run.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/io/Serializable/evolution/AddedExternField/run.sh Mon Dec 17 08:30:06 2012 -0500
@@ -27,7 +27,7 @@
rm *.class tmp.ser
javac WriteAddedField.java
-java WriteAddedField
+java ${TESTVMOPTS} WriteAddedField
rm *.class
javac ReadAddedField.java
-java ReadAddedField
+java ${TESTVMOPTS} ReadAddedField
--- a/jdk/test/java/io/Serializable/evolution/RenamePackage/run.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/io/Serializable/evolution/RenamePackage/run.sh Mon Dec 17 08:30:06 2012 -0500
@@ -86,13 +86,13 @@
# Run Case 1. Map test.SerialDriver within stream to install.SerialDriver.
CLASSPATH="${TESTCLASSES}/oclasses${PS}${TESTCLASSES}/share"; export CLASSPATH;
-${JAVA} test.SerialDriver -s
+${JAVA} ${TESTVMOPTS} test.SerialDriver -s
CLASSPATH="${TESTCLASSES}/nclasses${PS}${TESTCLASSES}/share"; export CLASSPATH;
-${JAVA} install.SerialDriver -d
+${JAVA} ${TESTVMOPTS} install.SerialDriver -d
rm stream.ser
# Run Case 2. Map install.SerialDriver within stream to test.SerialDriver.
CLASSPATH="${TESTCLASSES}/nclasses${PS}${TESTCLASSES}/share"; export CLASSPATH;
-${JAVA} install.SerialDriver -s
+${JAVA} ${TESTVMOPTS} install.SerialDriver -s
CLASSPATH="${TESTCLASSES}/oclasses${PS}${TESTCLASSES}/share"; export CLASSPATH;
-${JAVA} test.SerialDriver -d
+${JAVA} ${TESTVMOPTS} test.SerialDriver -d
--- a/jdk/test/java/io/Serializable/maskSyntheticModifier/run.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/io/Serializable/maskSyntheticModifier/run.sh Mon Dec 17 08:30:06 2012 -0500
@@ -41,5 +41,5 @@
set -ex
cp ${TESTSRC}/Foo.class .
${TESTJAVA}/bin/javac -d . ${TESTSRC}/Test.java
-${TESTJAVA}/bin/java Test
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test
rm -f *.class
--- a/jdk/test/java/io/Serializable/packageAccess/run.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/io/Serializable/packageAccess/run.sh Mon Dec 17 08:30:06 2012 -0500
@@ -45,5 +45,5 @@
${TESTJAVA}/bin/jar cf foo.jar B.class D.class
rm -f B.class D.class
-${TESTJAVA}/bin/java Test
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test
rm -f *.class *.jar
--- a/jdk/test/java/io/Serializable/resolveClass/consTest/run.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/io/Serializable/resolveClass/consTest/run.sh Mon Dec 17 08:30:06 2012 -0500
@@ -44,5 +44,5 @@
${TESTJAVA}/bin/jar cf boot.jar *.class
rm -f *.class
${TESTJAVA}/bin/javac -classpath boot.jar -d . ${TESTSRC}/Test.java
-${TESTJAVA}/bin/java -Xbootclasspath/a:boot.jar Test
+${TESTJAVA}/bin/java ${TESTVMOPTS} -Xbootclasspath/a:boot.jar Test
rm -f *.class *.jar
--- a/jdk/test/java/io/Serializable/resolveClass/deserializeButton/run.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/io/Serializable/resolveClass/deserializeButton/run.sh Mon Dec 17 08:30:06 2012 -0500
@@ -46,5 +46,5 @@
${TESTJAVA}/bin/jar cf cb.jar *.class
rm -f *.class
${TESTJAVA}/bin/javac -d . ${TESTSRC}/Test.java
-${TESTJAVA}/bin/java Test
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test
rm -f *.class *.jar
--- a/jdk/test/java/io/Serializable/resolveProxyClass/NonPublicInterface.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/io/Serializable/resolveProxyClass/NonPublicInterface.java Mon Dec 17 08:30:06 2012 -0500
@@ -22,14 +22,17 @@
*/
/* @test
- * @bug 4413817
+ * @bug 4413817 8004928
* @summary Verify that ObjectInputStream.resolveProxyClass can properly
* resolve a dynamic proxy class which implements a non-public
* interface not defined in the latest user defined class loader.
*/
import java.io.*;
-import java.lang.reflect.*;
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.lang.reflect.Proxy;
public class NonPublicInterface {
@@ -39,35 +42,19 @@
}
}
+ public static final String nonPublicIntrfaceName = "java.util.zip.ZipConstants";
+
public static void main(String[] args) throws Exception {
- Class nonPublic = null;
- String[] nonPublicInterfaces = new String[] {
- "java.awt.Conditional",
- "java.util.zip.ZipConstants",
- "javax.swing.GraphicsWrapper",
- "javax.swing.JPopupMenu$Popup",
- "javax.swing.JTable$Resizable2",
- "javax.swing.JTable$Resizable3",
- "javax.swing.ToolTipManager$Popup",
- "sun.audio.Format",
- "sun.audio.HaePlayable",
- "sun.tools.agent.StepConstants",
- };
- for (int i = 0; i < nonPublicInterfaces.length; i++) {
- try {
- nonPublic = Class.forName(nonPublicInterfaces[i]);
- break;
- } catch (ClassNotFoundException ex) {
- }
- }
- if (nonPublic == null) {
- throw new Error("couldn't find system non-public interface");
+ Class<?> nonPublic = Class.forName(nonPublicIntrfaceName);
+ if (Modifier.isPublic(nonPublic.getModifiers())) {
+ throw new Error("Interface " + nonPublicIntrfaceName +
+ " is public and need to be changed!");
}
ByteArrayOutputStream bout = new ByteArrayOutputStream();
ObjectOutputStream oout = new ObjectOutputStream(bout);
oout.writeObject(Proxy.newProxyInstance(nonPublic.getClassLoader(),
- new Class[]{ nonPublic }, new Handler()));
+ new Class<?>[]{ nonPublic }, new Handler()));
oout.close();
ObjectInputStream oin = new ObjectInputStream(
new ByteArrayInputStream(bout.toByteArray()));
--- a/jdk/test/java/io/Serializable/subclass/run.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/io/Serializable/subclass/run.sh Mon Dec 17 08:30:06 2012 -0500
@@ -25,6 +25,6 @@
# @bug 4100915
mkdir classes
javac -d classes *.java
-java -classpath classes -Djava.policy=Allow.policy Test
+java ${TESTVMOPTS} -classpath classes -Djava.policy=Allow.policy Test
# ENABLE next line when new method for invoking a main with a SecureClassLoader is known
#java -classpath classes -Djava.policy=NotAllow.policy Test -expectSecurityException
--- a/jdk/test/java/io/Serializable/superclassDataLoss/run.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/io/Serializable/superclassDataLoss/run.sh Mon Dec 17 08:30:06 2012 -0500
@@ -46,5 +46,5 @@
rm -f A.class B.class
${TESTJAVA}/bin/javac -d . ${TESTSRC}/Test.java
-${TESTJAVA}/bin/java Test
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test
rm -f *.class *.jar
--- a/jdk/test/java/io/Serializable/unnamedPackageSwitch/run.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/io/Serializable/unnamedPackageSwitch/run.sh Mon Dec 17 08:30:06 2012 -0500
@@ -41,4 +41,4 @@
set -ex
${TESTJAVA}/bin/javac -d . ${TESTSRC}/A.java ${TESTSRC}/Test.java
-${TESTJAVA}/bin/java Test
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test
--- a/jdk/test/java/lang/Class/forName/NonJavaNames.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/lang/Class/forName/NonJavaNames.sh Mon Dec 17 08:30:06 2012 -0500
@@ -94,7 +94,7 @@
COPYSEMICOLON="cp ${TESTSRC}/classes/semicolon.class ${TESTCLASSES}/;.class"
$COPYSEMICOLON
-JAVA="$TESTJAVA/bin/java -classpath ${TESTSRC}/classes${SEP}${TESTCLASSES}"
+JAVA="$TESTJAVA/bin/java ${TESTVMOPTS} -classpath ${TESTSRC}/classes${SEP}${TESTCLASSES}"
$JAVA NonJavaNames
RESULT=$?
--- a/jdk/test/java/lang/ClassLoader/Assert.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/lang/ClassLoader/Assert.sh Mon Dec 17 08:30:06 2012 -0500
@@ -49,7 +49,7 @@
${TESTJAVA}/bin/javac Assert.java
-${TESTJAVA}/bin/java Assert
+${TESTJAVA}/bin/java ${TESTVMOPTS} Assert
result=$?
if [ $result -eq 0 ]
--- a/jdk/test/java/lang/ClassLoader/deadlock/TestCrossDelegate.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/lang/ClassLoader/deadlock/TestCrossDelegate.sh Mon Dec 17 08:30:06 2012 -0500
@@ -98,6 +98,7 @@
# run test
${TESTJAVA}${FS}bin${FS}java \
+ ${TESTVMOPTS} \
-verbose:class -XX:+TraceClassLoading -cp . \
-Dtest.classes=${TESTCLASSES} \
Starter cross
--- a/jdk/test/java/lang/ClassLoader/deadlock/TestOneWayDelegate.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/lang/ClassLoader/deadlock/TestOneWayDelegate.sh Mon Dec 17 08:30:06 2012 -0500
@@ -93,6 +93,7 @@
# run test
${TESTJAVA}${FS}bin${FS}java \
+ ${TESTVMOPTS} \
-verbose:class -XX:+TraceClassLoading -cp . \
-Dtest.classes=${TESTCLASSES} \
Starter one-way
--- a/jdk/test/java/lang/ClassLoader/getdotresource.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/lang/ClassLoader/getdotresource.sh Mon Dec 17 08:30:06 2012 -0500
@@ -33,5 +33,5 @@
if [ x"$TESTSRC" = x ]; then TESTSRC=.; fi
# now start the test
-${TESTJAVA}/bin/java -Djava.ext.dirs=$TESTSRC -cp $TESTCLASSES GetDotResource
+${TESTJAVA}/bin/java ${TESTVMOPTS} -Djava.ext.dirs=$TESTSRC -cp $TESTCLASSES GetDotResource
--- a/jdk/test/java/lang/Runtime/exec/setcwd.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/lang/Runtime/exec/setcwd.sh Mon Dec 17 08:30:06 2012 -0500
@@ -37,5 +37,5 @@
cp ${TESTCLASSES}/SetCwd.class foo
# now start the test
-${TESTJAVA}/bin/java SetCwd
+${TESTJAVA}/bin/java ${TESTVMOPTS} SetCwd
--- a/jdk/test/java/lang/StringCoding/CheckEncodings.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/lang/StringCoding/CheckEncodings.sh Mon Dec 17 08:30:06 2012 -0500
@@ -49,7 +49,7 @@
echo "Testing:" ${1}
set LC_ALL="${1}"; export LC_ALL
locale
- ${TESTJAVA}/bin/java -version 2>&1
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} -version 2>&1
expectPass $?
}
--- a/jdk/test/java/lang/System/finalization/FinExit.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/lang/System/finalization/FinExit.sh Mon Dec 17 08:30:06 2012 -0500
@@ -26,7 +26,7 @@
#
# We only want the first character, Windows might add CRLF
-x=`$TESTJAVA/bin/java -cp "$TESTCLASSES" FinExit | cut -c1`
+x=`$TESTJAVA/bin/java ${TESTVMOPTS} -cp "$TESTCLASSES" FinExit | cut -c1`
echo $x
if [ "x$x" != "x1" ]; then
echo On-exit finalizer invoked twice
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/lang/ThreadLocal/ThreadLocalSupplierTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
+import org.testng.annotations.Test;
+import static org.testng.Assert.*;
+
+/**
+ * @test
+ * @run testng ThreadLocalSupplierTest
+ * @summary tests ThreadLocal.withInitial(<Supplier>).
+ * Adapted from java.lang.Basic functional test of ThreadLocal
+ *
+ * @author Jim Gish <jim.gish@oracle.com>
+ */
+@Test
+public class ThreadLocalSupplierTest {
+
+ static final class IntegerSupplier implements Supplier<Integer> {
+
+ private final AtomicInteger supply = new AtomicInteger(0);
+
+ @Override
+ public Integer get() {
+ return supply.getAndIncrement();
+ }
+
+ public int numCalls() {
+ return supply.intValue();
+ }
+ }
+
+ static IntegerSupplier theSupply = new IntegerSupplier();
+
+ static final class MyThreadLocal extends ThreadLocal<Integer> {
+
+ private final ThreadLocal<Integer> delegate;
+
+ public volatile boolean everCalled;
+
+ public MyThreadLocal(Supplier<Integer> supplier) {
+ delegate = ThreadLocal.<Integer>withInitial(supplier);
+ }
+
+ @Override
+ public Integer get() {
+ return delegate.get();
+ }
+
+ @Override
+ protected synchronized Integer initialValue() {
+ // this should never be called since we are using the factory instead
+ everCalled = true;
+ return null;
+ }
+ }
+
+ /**
+ * Our one and only ThreadLocal from which we get thread ids using a
+ * supplier which simply increments a counter on each call of get().
+ */
+ static MyThreadLocal threadLocal = new MyThreadLocal(theSupply);
+
+ public void testMultiThread() throws Exception {
+ final int threadCount = 500;
+ final Thread th[] = new Thread[threadCount];
+ final boolean visited[] = new boolean[threadCount];
+
+ // Create and start the threads
+ for (int i = 0; i < threadCount; i++) {
+ th[i] = new Thread() {
+ @Override
+ public void run() {
+ final int threadId = threadLocal.get();
+ assertFalse(visited[threadId], "visited[" + threadId + "]=" + visited[threadId]);
+ visited[threadId] = true;
+ // check the get() again
+ final int secondCheckThreadId = threadLocal.get();
+ assertEquals(secondCheckThreadId, threadId);
+ }
+ };
+ th[i].start();
+ }
+
+ // Wait for the threads to finish
+ for (int i = 0; i < threadCount; i++) {
+ th[i].join();
+ }
+
+ assertEquals(theSupply.numCalls(), threadCount);
+ // make sure the provided initialValue() has not been called
+ assertFalse(threadLocal.everCalled);
+ // Check results
+ for (int i = 0; i < threadCount; i++) {
+ assertTrue(visited[i], "visited[" + i + "]=" + visited[i]);
+ }
+ }
+
+ public void testSimple() {
+ final String expected = "OneWithEverything";
+ final ThreadLocal<String> threadLocal = ThreadLocal.<String>withInitial(() -> expected);
+ assertEquals(expected, threadLocal.get());
+ }
+}
--- a/jdk/test/java/lang/Throwable/LegacyChainedExceptionSerialization.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/lang/Throwable/LegacyChainedExceptionSerialization.java Mon Dec 17 08:30:06 2012 -0500
@@ -25,7 +25,7 @@
/**
* @test
- * @bug 4385429
+ * @bug 4385429 8004928
* @summary Certain legacy chained exceptions throw IllegalArgumentException
* upon deserialization if "causative exception" is null.
* @author Josh Bloch
@@ -36,8 +36,7 @@
new ExceptionInInitializerError(),
new java.lang.reflect.UndeclaredThrowableException(null),
new java.lang.reflect.InvocationTargetException(null),
- new java.security.PrivilegedActionException(null),
- new java.awt.print.PrinterIOException(null)
+ new java.security.PrivilegedActionException(null)
};
public static void main(String[] args) throws Exception {
--- a/jdk/test/java/lang/annotation/loaderLeak/LoaderLeak.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/lang/annotation/loaderLeak/LoaderLeak.sh Mon Dec 17 08:30:06 2012 -0500
@@ -69,7 +69,7 @@
cp ${TESTSRC}${FS}*.java .
${TESTJAVA}${FS}bin${FS}javac -d classes A.java B.java C.java
${TESTJAVA}${FS}bin${FS}javac Main.java
-${TESTJAVA}${FS}bin${FS}java Main
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} Main
result=$?
if [ $result -eq 0 ]
then
@@ -78,7 +78,7 @@
echo "Failed 1 of 2"
exit $result
fi
-${TESTJAVA}${FS}bin${FS}java Main foo
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} Main foo
result=$?
if [ $result -eq 0 ]
then
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/lang/invoke/lambda/LambdaAccessControlDoPrivilegedTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8003881
+ * @summary tests DoPrivileged action (implemented as lambda expressions) by
+ * inserting them into the BootClassPath.
+ * @compile -XDignore.symbol.file LambdaAccessControlDoPrivilegedTest.java
+ * @run main/othervm LambdaAccessControlDoPrivilegedTest
+ */
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.nio.charset.Charset;
+import java.nio.file.Files;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public class LambdaAccessControlDoPrivilegedTest extends LUtils {
+ public static void main(String... args) {
+ final List<String> scratch = new ArrayList();
+ scratch.clear();
+ scratch.add("import java.security.*;");
+ scratch.add("public class DoPriv {");
+ scratch.add("public static void main(String... args) {");
+ scratch.add("String prop = AccessController.doPrivileged((PrivilegedAction<String>) () -> {");
+ scratch.add("return System.getProperty(\"user.home\");");
+ scratch.add("});");
+ scratch.add("}");
+ scratch.add("}");
+ File doprivJava = new File("DoPriv.java");
+ File doprivClass = getClassFile(doprivJava);
+ createFile(doprivJava, scratch);
+
+ scratch.clear();
+ scratch.add("public class Bar {");
+ scratch.add("public static void main(String... args) {");
+ scratch.add("System.out.println(\"sun.boot.class.path\" + \"=\" +");
+ scratch.add(" System.getProperty(\"sun.boot.class.path\", \"\"));");
+ scratch.add("System.setSecurityManager(new SecurityManager());");
+ scratch.add("DoPriv.main();");
+ scratch.add("}");
+ scratch.add("}");
+
+ File barJava = new File("Bar.java");
+ File barClass = getClassFile(barJava);
+ createFile(barJava, scratch);
+
+ String[] javacArgs = {barJava.getName(), doprivJava.getName()};
+ compile(javacArgs);
+ File jarFile = new File("foo.jar");
+ String[] jargs = {"cvf", jarFile.getName(), doprivClass.getName()};
+ jarTool.run(jargs);
+ doprivJava.delete();
+ doprivClass.delete();
+ TestResult tr = doExec(JAVA_CMD.getAbsolutePath(),
+ "-Xbootclasspath/p:foo.jar",
+ "-cp", ".", "Bar");
+ tr.assertZero("testDoPrivileged fails");
+ barJava.delete();
+ barClass.delete();
+ jarFile.delete();
+ }
+}
+
+/*
+ * support infrastructure to invoke a java class from the command line
+ */
+class LUtils {
+ static final sun.tools.jar.Main jarTool =
+ new sun.tools.jar.Main(System.out, System.err, "jar-tool");
+ static final com.sun.tools.javac.Main javac =
+ new com.sun.tools.javac.Main();
+ static final File cwd = new File(".").getAbsoluteFile();
+ static final String JAVAHOME = System.getProperty("java.home");
+ static final boolean isWindows =
+ System.getProperty("os.name", "unknown").startsWith("Windows");
+ //static final boolean isSDK = JAVAHOME.endsWith("jre");
+ static final File JAVA_BIN_FILE = new File(JAVAHOME, "bin");
+ static final File JAVA_CMD = new File(JAVA_BIN_FILE,
+ isWindows ? "java.exe" : "java");
+
+ protected LUtils() {
+ }
+
+ public static void compile(String... args) {
+ if (javac.compile(args) != 0) {
+ throw new RuntimeException("compilation fails");
+ }
+ }
+
+ static void createFile(File outFile, List<String> content) {
+ try {
+ Files.write(outFile.getAbsoluteFile().toPath(), content,
+ Charset.defaultCharset());
+ } catch (IOException ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+
+ static File getClassFile(File javaFile) {
+ return javaFile.getName().endsWith(".java")
+ ? new File(javaFile.getName().replace(".java", ".class"))
+ : null;
+ }
+
+ static String getSimpleName(File inFile) {
+ String fname = inFile.getName();
+ return fname.substring(0, fname.indexOf("."));
+ }
+
+ static TestResult doExec(String... cmds) {
+ return doExec(null, null, cmds);
+ }
+
+ /*
+ * A method which executes a java cmd and returns the results in a container
+ */
+ static TestResult doExec(Map<String, String> envToSet,
+ java.util.Set<String> envToRemove, String... cmds) {
+ String cmdStr = "";
+ for (String x : cmds) {
+ cmdStr = cmdStr.concat(x + " ");
+ }
+ ProcessBuilder pb = new ProcessBuilder(cmds);
+ Map<String, String> env = pb.environment();
+ if (envToRemove != null) {
+ for (String key : envToRemove) {
+ env.remove(key);
+ }
+ }
+ if (envToSet != null) {
+ env.putAll(envToSet);
+ }
+ BufferedReader rdr = null;
+ try {
+ List<String> outputList = new ArrayList<>();
+ pb.redirectErrorStream(true);
+ Process p = pb.start();
+ rdr = new BufferedReader(new InputStreamReader(p.getInputStream()));
+ String in = rdr.readLine();
+ while (in != null) {
+ outputList.add(in);
+ in = rdr.readLine();
+ }
+ p.waitFor();
+ p.destroy();
+
+ return new TestResult(cmdStr, p.exitValue(), outputList,
+ env, new Throwable("current stack of the test"));
+ } catch (Exception ex) {
+ ex.printStackTrace();
+ throw new RuntimeException(ex.getMessage());
+ }
+ }
+
+ static class TestResult {
+ String cmd;
+ int exitValue;
+ List<String> testOutput;
+ Map<String, String> env;
+ Throwable t;
+
+ public TestResult(String str, int rv, List<String> oList,
+ Map<String, String> env, Throwable t) {
+ cmd = str;
+ exitValue = rv;
+ testOutput = oList;
+ this.env = env;
+ this.t = t;
+ }
+
+ void assertZero(String message) {
+ if (exitValue != 0) {
+ System.err.println(this);
+ throw new RuntimeException(message);
+ }
+ }
+
+ @Override
+ public String toString() {
+ StringWriter sw = new StringWriter();
+ PrintWriter status = new PrintWriter(sw);
+ status.println("Cmd: " + cmd);
+ status.println("Return code: " + exitValue);
+ status.println("Environment variable:");
+ for (String x : env.keySet()) {
+ status.println("\t" + x + "=" + env.get(x));
+ }
+ status.println("Output:");
+ for (String x : testOutput) {
+ status.println("\t" + x);
+ }
+ status.println("Exception:");
+ status.println(t.getMessage());
+ t.printStackTrace(status);
+
+ return sw.getBuffer().toString();
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/lang/invoke/lambda/LambdaAccessControlTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8003881
+ * @summary tests Lambda expression with a a security manager at top level
+ * @compile -XDignore.symbol.file LambdaAccessControlTest.java
+ *
+ * @run main/othervm LambdaAccessControlTest
+ */
+
+public class LambdaAccessControlTest extends LUtils {
+ public static void main(String... args) {
+ System.setSecurityManager(new SecurityManager());
+ JJ<Integer> iii = (new CC())::impl;
+ System.out.printf(">>> %s\n", iii.foo(44));
+ iii = DD::impl;
+ System.out.printf(">>> %s\n", iii.foo(44));
+ return;
+ }
+}
+/*
+ * support classes for the test
+ */
+interface II<T> { Object foo(T x); }
+interface JJ<R extends Number> extends II<R> { }
+class CC { String impl(int i) { return "impl:"+i; }}
+class DD { static String impl(int i) { return "impl:"+i; }}
--- a/jdk/test/java/lang/management/CompilationMXBean/Basic.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/lang/management/CompilationMXBean/Basic.java Mon Dec 17 08:30:06 2012 -0500
@@ -23,10 +23,10 @@
/*
* @test
- * @bug 5011189
+ * @bug 5011189 8004928
* @summary Unit test for java.lang.management.CompilationMXBean
*
- * @run main/othervm -Xcomp -Xbatch -Djava.awt.headless=true Basic
+ * @run main/othervm -Xcomp -Xbatch Basic
*/
import java.lang.management.*;
@@ -65,8 +65,6 @@
java.util.Locale.getAvailableLocales();
java.security.Security.getProviders();
- java.awt.Toolkit.getDefaultToolkit();
- javax.swing.UIManager.getInstalledLookAndFeels();
java.nio.channels.spi.SelectorProvider.provider();
time = mb.getTotalCompilationTime();
--- a/jdk/test/java/lang/management/OperatingSystemMXBean/TestSystemLoadAvg.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/lang/management/OperatingSystemMXBean/TestSystemLoadAvg.sh Mon Dec 17 08:30:06 2012 -0500
@@ -52,7 +52,7 @@
runOne()
{
echo "$TESTJAVA/bin/java -classpath $TESTCLASSES $@"
- $TESTJAVA/bin/java -classpath $TESTCLASSES $@
+ $TESTJAVA/bin/java ${TESTVMOPTS} -classpath $TESTCLASSES $@
}
# Retry 5 times to be more resilent to system load fluctation.
--- a/jdk/test/java/lang/reflect/Generics/Probe.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/lang/reflect/Generics/Probe.java Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
/*
* @test
- * @bug 5003916 6704655 6873951 6476261
+ * @bug 5003916 6704655 6873951 6476261 8004928
* @summary Testing parsing of signatures attributes of nested classes
* @author Joseph D. Darcy
*/
@@ -52,8 +52,7 @@
"java.util.HashMap$ValueIterator",
"java.util.LinkedHashMap$EntryIterator",
"java.util.LinkedHashMap$KeyIterator",
- "java.util.LinkedHashMap$ValueIterator",
- "javax.swing.JComboBox$AccessibleJComboBox"})
+ "java.util.LinkedHashMap$ValueIterator"})
public class Probe {
public static void main (String... args) throws Throwable {
Classes classesAnnotation = (Probe.class).getAnnotation(Classes.class);
--- a/jdk/test/java/lang/reflect/Proxy/ClassRestrictions.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/lang/reflect/Proxy/ClassRestrictions.java Mon Dec 17 08:30:06 2012 -0500
@@ -22,7 +22,7 @@
*/
/* @test
- * @bug 4227192
+ * @bug 4227192 8004928
* @summary This is a test of the restrictions on the parameters that may
* be passed to the Proxy.getProxyClass method.
* @author Peter Jones
@@ -31,6 +31,7 @@
* @run main ClassRestrictions
*/
+import java.lang.reflect.Modifier;
import java.lang.reflect.Proxy;
import java.net.URLClassLoader;
@@ -48,6 +49,8 @@
void foo();
}
+ public static final String nonPublicIntrfaceName = "java.util.zip.ZipConstants";
+
public static void main(String[] args) {
System.err.println(
@@ -65,7 +68,7 @@
try {
interfaces = new Class<?>[] { Object.class };
proxyClass = Proxy.getProxyClass(loader, interfaces);
- throw new RuntimeException(
+ throw new Error(
"proxy class created with java.lang.Object as interface");
} catch (IllegalArgumentException e) {
e.printStackTrace();
@@ -75,7 +78,7 @@
try {
interfaces = new Class<?>[] { Integer.TYPE };
proxyClass = Proxy.getProxyClass(loader, interfaces);
- throw new RuntimeException(
+ throw new Error(
"proxy class created with int.class as interface");
} catch (IllegalArgumentException e) {
e.printStackTrace();
@@ -90,7 +93,7 @@
try {
interfaces = new Class<?>[] { Bar.class, Bar.class };
proxyClass = Proxy.getProxyClass(loader, interfaces);
- throw new RuntimeException(
+ throw new Error(
"proxy class created with repeated interfaces");
} catch (IllegalArgumentException e) {
e.printStackTrace();
@@ -109,7 +112,7 @@
try {
interfaces = new Class<?>[] { altBarClass };
proxyClass = Proxy.getProxyClass(loader, interfaces);
- throw new RuntimeException(
+ throw new Error(
"proxy class created with interface " +
"not visible to class loader");
} catch (IllegalArgumentException e) {
@@ -122,34 +125,16 @@
* All non-public interfaces must be in the same package.
*/
Class<?> nonPublic1 = Bashful.class;
- Class<?> nonPublic2 = null;
- String[] nonPublicInterfaces = new String[] {
- "java.awt.Conditional",
- "java.util.zip.ZipConstants",
- "javax.swing.GraphicsWrapper",
- "javax.swing.JPopupMenu$Popup",
- "javax.swing.JTable$Resizable2",
- "javax.swing.JTable$Resizable3",
- "javax.swing.ToolTipManager$Popup",
- "sun.audio.Format",
- "sun.audio.HaePlayable",
- "sun.tools.agent.StepConstants",
- };
- for (int i = 0; i < nonPublicInterfaces.length; i++) {
- try {
- nonPublic2 = Class.forName(nonPublicInterfaces[i]);
- break;
- } catch (ClassNotFoundException e) {
- }
- }
- if (nonPublic2 == null) {
- throw new RuntimeException(
- "no second non-public interface found for test");
+ Class<?> nonPublic2 = Class.forName(nonPublicIntrfaceName);
+ if (Modifier.isPublic(nonPublic2.getModifiers())) {
+ throw new Error(
+ "Interface " + nonPublicIntrfaceName +
+ " is public and need to be changed!");
}
try {
interfaces = new Class<?>[] { nonPublic1, nonPublic2 };
proxyClass = Proxy.getProxyClass(loader, interfaces);
- throw new RuntimeException(
+ throw new Error(
"proxy class created with two non-public interfaces " +
"in different packages");
} catch (IllegalArgumentException e) {
@@ -165,7 +150,7 @@
try {
interfaces = new Class<?>[] { Bar.class, Baz.class };
proxyClass = Proxy.getProxyClass(loader, interfaces);
- throw new RuntimeException(
+ throw new Error(
"proxy class created with conflicting methods");
} catch (IllegalArgumentException e) {
e.printStackTrace();
@@ -178,10 +163,10 @@
*/
System.err.println("\nTEST PASSED");
- } catch (Exception e) {
+ } catch (Throwable e) {
System.err.println("\nTEST FAILED:");
e.printStackTrace();
- throw new RuntimeException("TEST FAILED: " + e.toString());
+ throw new Error("TEST FAILED: ", e);
}
}
}
--- a/jdk/test/java/net/Authenticator/B4933582.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/net/Authenticator/B4933582.sh Mon Dec 17 08:30:06 2012 -0500
@@ -45,5 +45,5 @@
esac
${TESTJAVA}${FS}bin${FS}javac -d . -classpath "${TESTSRC}${FS}..${FS}..${FS}..${FS}sun${FS}net${FS}www${FS}httptest" ${TESTSRC}${FS}B4933582.java
rm -f cache.ser auth.save
-${TESTJAVA}${FS}bin${FS}java -classpath "${TESTSRC}${FS}..${FS}..${FS}..${FS}sun${FS}net${FS}www${FS}httptest${PS}." B4933582 first
-${TESTJAVA}${FS}bin${FS}java -classpath "${TESTSRC}${FS}..${FS}..${FS}..${FS}sun${FS}net${FS}www${FS}httptest${PS}." B4933582 second
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -classpath "${TESTSRC}${FS}..${FS}..${FS}..${FS}sun${FS}net${FS}www${FS}httptest${PS}." B4933582 first
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -classpath "${TESTSRC}${FS}..${FS}..${FS}..${FS}sun${FS}net${FS}www${FS}httptest${PS}." B4933582 second
--- a/jdk/test/java/net/CookieHandler/B6791927.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/net/CookieHandler/B6791927.java Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,7 +37,7 @@
try {
// Forces a non US locale
Locale.setDefault(Locale.FRANCE);
- List<HttpCookie> cookies = HttpCookie.parse("set-cookie: CUSTOMER=WILE_E_COYOTE; expires=Wednesday, 09-Nov-2019 23:12:40 GMT");
+ List<HttpCookie> cookies = HttpCookie.parse("set-cookie: CUSTOMER=WILE_E_COYOTE; expires=Sat, 09-Nov-2019 23:12:40 GMT");
if (cookies == null || cookies.isEmpty()) {
throw new RuntimeException("No cookie found");
}
--- a/jdk/test/java/net/CookieHandler/CookieManagerTest.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/net/CookieHandler/CookieManagerTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -126,7 +126,7 @@
testPolicies[count] = CookiePolicy.ACCEPT_ORIGINAL_SERVER;
testCases[count++] = new CookieTestCase[]{
new CookieTestCase("Set-Cookie",
- "CUSTOMER=WILE:BOB; path=/; expires=Wednesday, 09-Nov-2030 23:12:40 GMT;" + "domain=." + localHostAddr,
+ "CUSTOMER=WILE:BOB; path=/; expires=Sat, 09-Nov-2030 23:12:40 GMT;" + "domain=." + localHostAddr,
"CUSTOMER=WILE:BOB",
"/"
),
--- a/jdk/test/java/net/DatagramSocket/SetDatagramSocketImplFactory/ADatagramSocket.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/net/DatagramSocket/SetDatagramSocketImplFactory/ADatagramSocket.sh Mon Dec 17 08:30:06 2012 -0500
@@ -49,4 +49,4 @@
;;
esac
-${TESTJAVA}${FILESEP}bin${FILESEP}java -Xbootclasspath/p:${TESTCLASSES} ADatagramSocket true
+${TESTJAVA}${FILESEP}bin${FILESEP}java ${TESTVMOPTS} -Xbootclasspath/p:${TESTCLASSES} ADatagramSocket true
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/net/HttpCookie/ExpiredCookieTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8000525
+ */
+
+import java.net.*;
+import java.util.*;
+import java.io.*;
+import java.text.*;
+
+public class ExpiredCookieTest {
+ // lifted from HttpCookie.java
+ private final static String[] COOKIE_DATE_FORMATS = {
+ "EEE',' dd-MMM-yy HH:mm:ss 'GMT'",
+ "EEE',' dd MMM yy HH:mm:ss 'GMT'",
+ "EEE MMM dd yy HH:mm:ss 'GMT'Z",
+ "EEE',' dd-MMM-yyyy HH:mm:ss 'GMT'",
+ "EEE',' dd MMM yyyy HH:mm:ss 'GMT'",
+ "EEE MMM dd yyyy HH:mm:ss 'GMT'Z"
+ };
+ static final TimeZone GMT = TimeZone.getTimeZone("GMT");
+
+ public static void main(String[] args) throws Exception {
+ Calendar cal = Calendar.getInstance(GMT);
+
+ for (int i = 0; i < COOKIE_DATE_FORMATS.length; i++) {
+ SimpleDateFormat df = new SimpleDateFormat(COOKIE_DATE_FORMATS[i],
+ Locale.US);
+ cal.set(1970, 0, 1, 0, 0, 0);
+ df.setTimeZone(GMT);
+ df.setLenient(false);
+ df.set2DigitYearStart(cal.getTime());
+ CookieManager cm = new CookieManager(
+ null, CookiePolicy.ACCEPT_ALL);
+ CookieHandler.setDefault(cm);
+ Map<String,List<String>> header = new HashMap<>();
+ List<String> values = new ArrayList<>();
+
+ cal.set(1970, 6, 9, 10, 10, 1);
+ StringBuilder datestring =
+ new StringBuilder(df.format(cal.getTime()));
+ values.add(
+ "TEST1=TEST1; Path=/; Expires=" + datestring.toString());
+
+ cal.set(1969, 6, 9, 10, 10, 2);
+ datestring = new StringBuilder(df.format(cal.getTime()));
+ values.add(
+ "TEST2=TEST2; Path=/; Expires=" + datestring.toString());
+
+ cal.set(2070, 6, 9, 10, 10, 3);
+ datestring = new StringBuilder(df.format(cal.getTime()));
+ values.add(
+ "TEST3=TEST3; Path=/; Expires=" + datestring.toString());
+
+ cal.set(2069, 6, 9, 10, 10, 4);
+ datestring = new StringBuilder(df.format(cal.getTime()));
+ values.add(
+ "TEST4=TEST4; Path=/; Expires=" + datestring.toString());
+
+ header.put("Set-Cookie", values);
+ cm.put(new URI("http://127.0.0.1/"), header);
+
+ CookieStore cookieJar = cm.getCookieStore();
+ List <HttpCookie> cookies = cookieJar.getCookies();
+ if (COOKIE_DATE_FORMATS[i].contains("yyyy")) {
+ if (cookies.size() != 2)
+ throw new RuntimeException(
+ "Incorrectly parsing a bad date");
+ } else if (cookies.size() != 1) {
+ throw new RuntimeException(
+ "Incorrectly parsing a bad date");
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/net/Inet6Address/StringScope.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8004675
+ * @summary Inet6Address.getHostAddress should use string scope
+ * identifier where available
+ */
+
+import java.net.*;
+import java.util.Enumeration;
+
+public class StringScope {
+
+ public static void main(String args[]) throws Exception {
+ Enumeration<NetworkInterface> e = NetworkInterface.getNetworkInterfaces();
+ while (e.hasMoreElements()) {
+ NetworkInterface iface = e.nextElement();
+ Enumeration<InetAddress> iadrs = iface.getInetAddresses();
+ while (iadrs.hasMoreElements()) {
+ InetAddress iadr = iadrs.nextElement();
+ if (iadr instanceof Inet6Address) {
+ Inet6Address i6adr = (Inet6Address) iadr;
+ NetworkInterface nif = i6adr.getScopedInterface();
+ if (nif == null)
+ continue;
+
+ String nifName = nif.getName();
+ String i6adrHostAddress = i6adr.getHostAddress();
+ int index = i6adrHostAddress.indexOf('%');
+ String i6adrScopeName = i6adrHostAddress.substring(index+1);
+
+ if (!nifName.equals(i6adrScopeName))
+ throw new RuntimeException("Expected nifName ["
+ + nifName + "], to equal i6adrScopeName ["
+ + i6adrScopeName + "] ");
+ }
+ }
+ }
+ }
+}
+
--- a/jdk/test/java/net/InetAddress/ptr/lookup.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/net/InetAddress/ptr/lookup.sh Mon Dec 17 08:30:06 2012 -0500
@@ -53,13 +53,13 @@
# IPv4 reverse lookup
echo ''
-OUT1=`$JAVA -Djava.net.preferIPv4Stack=true Lookup -q=PTR $ADDR`
+OUT1=`$JAVA ${TESTVMOPTS} -Djava.net.preferIPv4Stack=true Lookup -q=PTR $ADDR`
echo "(IPv4) $ADDR --> $OUT1"
# reverse lookup (default)
echo ''
-OUT2=`$JAVA Lookup -q=PTR $ADDR`
+OUT2=`$JAVA ${TESTVMOPTS} Lookup -q=PTR $ADDR`
echo "(default) $ADDR --> $OUT2"
--- a/jdk/test/java/net/ServerSocket/AcceptCauseFileDescriptorLeak.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/net/ServerSocket/AcceptCauseFileDescriptorLeak.sh Mon Dec 17 08:30:06 2012 -0500
@@ -45,4 +45,4 @@
ulimit -n 1024
fi
-${TESTJAVA}/bin/java AcceptCauseFileDescriptorLeak
+${TESTJAVA}/bin/java ${TESTVMOPTS} AcceptCauseFileDescriptorLeak
--- a/jdk/test/java/net/Socket/OldSocketImpl.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/net/Socket/OldSocketImpl.sh Mon Dec 17 08:30:06 2012 -0500
@@ -50,7 +50,7 @@
# with 1.3 and in OldStyleImpl.jar
# run
-${TESTJAVA}${FS}bin${FS}java -cp ${TESTSRC}${FS}OldSocketImpl.jar OldSocketImpl
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -cp ${TESTSRC}${FS}OldSocketImpl.jar OldSocketImpl
result=$?
if [ "$result" -ne "0" ]; then
exit 1
--- a/jdk/test/java/net/Socks/SocksV4Test.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/net/Socks/SocksV4Test.java Mon Dec 17 08:30:06 2012 -0500
@@ -26,20 +26,26 @@
* @bug 4727547
* @summary SocksSocketImpl throws NullPointerException
* @build SocksServer
- * @run main SocksV4Test
*/
import java.net.*;
public class SocksV4Test {
+
+ // An unresolvable host
+ static final String HOSTNAME = "doesnot.exist.invalid";
+
public static void main(String[] args) throws Exception {
+ // sanity before running the test
+ assertUnresolvableHost(HOSTNAME);
+
// Create a SOCKS V4 proxy
SocksServer srvr = new SocksServer(0, true);
srvr.start();
Proxy sp = new Proxy(Proxy.Type.SOCKS,
new InetSocketAddress("localhost", srvr.getPort()));
// Let's create an unresolved address
- InetSocketAddress ad = new InetSocketAddress("doesnt.exist.name", 1234);
+ InetSocketAddress ad = new InetSocketAddress(HOSTNAME, 1234);
try (Socket s = new Socket(sp)) {
s.connect(ad, 10000);
} catch (UnknownHostException ex) {
@@ -51,4 +57,15 @@
srvr.terminate();
}
}
+
+ static void assertUnresolvableHost(String host) {
+ InetAddress addr = null;
+ try {
+ addr = InetAddress.getByName(host);
+ } catch (UnknownHostException x) {
+ // OK, expected
+ }
+ if (addr != null)
+ throw new RuntimeException("Test cannot run. resolvable address:" + addr);
+ }
}
--- a/jdk/test/java/net/URL/B5086147.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/net/URL/B5086147.sh Mon Dec 17 08:30:06 2012 -0500
@@ -47,7 +47,7 @@
failures=0
echo ''
-${TESTJAVA}${FS}bin${FS}java B5086147
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} B5086147
if [ $? != 0 ]; then failures=`expr $failures + 1`; fi
if [ "$failures" != "0" ]; then
--- a/jdk/test/java/net/URL/runconstructor.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/net/URL/runconstructor.sh Mon Dec 17 08:30:06 2012 -0500
@@ -50,7 +50,7 @@
go() {
echo ''
- ${TESTJAVA}${FS}bin${FS}java Constructor $1
+ ${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} Constructor $1
if [ $? != 0 ]; then failures=`expr $failures + 1`; fi
}
--- a/jdk/test/java/net/URLClassLoader/B5077773.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/net/URLClassLoader/B5077773.sh Mon Dec 17 08:30:06 2012 -0500
@@ -61,5 +61,5 @@
${TESTJAVA}${FS}bin${FS}javac -d . ${TESTSRC}${FS}B5077773.java
WD=`pwd`
-${TESTJAVA}${FS}bin${FS}java B5077773
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} B5077773
--- a/jdk/test/java/net/URLClassLoader/getresourceasstream/test.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/net/URLClassLoader/getresourceasstream/test.sh Mon Dec 17 08:30:06 2012 -0500
@@ -46,11 +46,11 @@
${TESTJAVA}/bin/javac -d . ${TESTSRC}/Test.java
cp ${TESTSRC}/test.jar .
-${TESTJAVA}/bin/java Test
+${TESTJAVA}/bin/java ${TESTVMOPTS} Test
checkExit
# try with security manager
-${TESTJAVA}/bin/java -Djava.security.policy=file:./policy -Djava.security.manager Test
+${TESTJAVA}/bin/java ${TESTVMOPTS} -Djava.security.policy=file:./policy -Djava.security.manager Test
checkExit
exit 0
--- a/jdk/test/java/net/URLClassLoader/sealing/checksealed.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/net/URLClassLoader/sealing/checksealed.sh Mon Dec 17 08:30:06 2012 -0500
@@ -56,7 +56,7 @@
CLASSPATH=".${PS}${TESTSRC}${FS}a${PS}${TESTSRC}${FS}b.jar"
${TESTJAVA}${FS}bin${FS}javac -classpath "${CLASSPATH}" -d . ${TESTSRC}${FS}CheckSealed.java
-${TESTJAVA}${FS}bin${FS}java -cp "${CLASSPATH}" CheckSealed 1
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -cp "${CLASSPATH}" CheckSealed 1
if [ $? != 0 ]; then exit 1; fi
-${TESTJAVA}${FS}bin${FS}java -cp "${CLASSPATH}" CheckSealed 2
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -cp "${CLASSPATH}" CheckSealed 2
if [ $? != 0 ]; then exit 1; fi
--- a/jdk/test/java/net/URLConnection/6212146/test.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/net/URLConnection/6212146/test.sh Mon Dec 17 08:30:06 2012 -0500
@@ -67,7 +67,7 @@
WD=`pwd`
ulimit -H -n 300
-${TESTJAVA}${FS}bin${FS}java Test ${WD}/jars/ test.jar
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} Test ${WD}/jars/ test.jar
result=$?
rm -rf jars
exit $?
--- a/jdk/test/java/net/URLConnection/UNCTest.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/net/URLConnection/UNCTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -36,7 +36,7 @@
case "$OS" in
Windows_95 | Windows_98 | Windows_NT )
${TESTJAVA}/bin/javac -d . ${TESTSRC}\\UNCTest.java
- ${TESTJAVA}/bin/java UNCTest ${UNC}
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} UNCTest ${UNC}
exit
;;
--- a/jdk/test/java/nio/Buffer/LimitDirectMemory.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/nio/Buffer/LimitDirectMemory.sh Mon Dec 17 08:30:06 2012 -0500
@@ -34,7 +34,7 @@
runTest() {
echo "Testing: $*"
- ${TESTJAVA}/bin/java $*
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} $*
if [ $? -eq 0 ]
then echo "--- passed as expected"
else
@@ -43,10 +43,11 @@
fi
}
+
launchFail() {
echo "Testing: -XX:MaxDirectMemorySize=$* -cp ${TESTCLASSES} \
LimitDirectMemory true DEFAULT DEFAULT+1M"
- ${TESTJAVA}/bin/java -XX:MaxDirectMemorySize=$* -cp ${TESTCLASSES} \
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} -XX:MaxDirectMemorySize=$* -cp ${TESTCLASSES} \
LimitDirectMemory true DEFAULT DEFAULT+1M > ${TMP1} 2>&1
cat ${TMP1}
cat ${TMP1} | grep -s "Unrecognized VM option: \'MaxDirectMemorySize="
--- a/jdk/test/java/nio/channels/AsynchronousChannelGroup/run_any_task.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/nio/channels/AsynchronousChannelGroup/run_any_task.sh Mon Dec 17 08:30:06 2012 -0500
@@ -46,7 +46,7 @@
-C "${TESTCLASSES}" Attack.class
echo "Running test ..."
-$JAVA -XX:-UseVMInterruptibleIO \
- -Xbootclasspath/a:"${TESTCLASSES}/Privileged.jar" \
- -classpath "${TESTCLASSES}" \
- AsExecutor
+$JAVA ${TESTVMOPTS} \
+ -Xbootclasspath/a:"${TESTCLASSES}/Privileged.jar" \
+ -classpath "${TESTCLASSES}" \
+ AsExecutor
--- a/jdk/test/java/nio/channels/spi/AsynchronousChannelProvider/custom_provider.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/nio/channels/spi/AsynchronousChannelProvider/custom_provider.sh Mon Dec 17 08:30:06 2012 -0500
@@ -51,7 +51,7 @@
go() {
echo ''
- $JAVA $1 $2 $3 2>&1
+ $JAVA ${TESTVMOPTS} $1 $2 $3 2>&1
if [ $? != 0 ]; then failures=`expr $failures + 1`; fi
}
--- a/jdk/test/java/nio/charset/Charset/default.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/nio/charset/Charset/default.sh Mon Dec 17 08:30:06 2012 -0500
@@ -64,7 +64,7 @@
ecs="$1"; shift
echo -n "$L: "
- cs="`LC_ALL=$L $JAVA -cp $TESTCLASSES Default`"
+ cs="`LC_ALL=$L $JAVA ${TESTVMOPTS} -cp $TESTCLASSES Default`"
if [ $? != 0 ]; then
exit 1
elif [ "`tolower $cs`" != "`tolower $ecs`" ]; then
--- a/jdk/test/java/nio/charset/coders/CheckSJISMappingProp.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/nio/charset/coders/CheckSJISMappingProp.sh Mon Dec 17 08:30:06 2012 -0500
@@ -51,7 +51,7 @@
}
-JAVA="${TESTJAVA}/bin/java -cp ${TESTCLASSES}"
+JAVA="${TESTJAVA}/bin/java ${TESTVMOPTS} -cp ${TESTCLASSES}"
runTest() {
echo "Testing:" ${1}
LC_ALL="$1" ; export LC_ALL
--- a/jdk/test/java/nio/charset/spi/basic.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/nio/charset/spi/basic.sh Mon Dec 17 08:30:06 2012 -0500
@@ -116,7 +116,7 @@
av="$av -Djava.security.manager
-Djava.security.policy==$TESTSRC/charsetProvider.sp";;
esac
- if (set -x; $JAVA $av Test $css) 2>&1; then
+ if (set -x; $JAVA ${TESTVMOPTS} $av Test $css) 2>&1; then
continue;
else
failures=`expr $failures + 1`
--- a/jdk/test/java/nio/file/Files/delete_on_close.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/nio/file/Files/delete_on_close.sh Mon Dec 17 08:30:06 2012 -0500
@@ -51,7 +51,7 @@
TMPFILE="$$.tmp"
touch $TMPFILE
-$JAVA DeleteOnClose $TMPFILE 2>&1
+$JAVA ${TESTVMOPTS} DeleteOnClose $TMPFILE 2>&1
if [ $? != 0 ]; then exit 1; fi
if [ -f $TMPFILE ]; then
echo "$TMPFILE was not deleted"
--- a/jdk/test/java/nio/file/Files/walkFileTree/walk_file_tree.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/nio/file/Files/walkFileTree/walk_file_tree.sh Mon Dec 17 08:30:06 2012 -0500
@@ -56,7 +56,7 @@
failures=0
# print the file tree and compare output with find(1)
-$JAVA PrintFileTree "$ROOT" > out1
+$JAVA ${TESTVMOPTS} PrintFileTree "$ROOT" > out1
find "$ROOT" > out2
diff out1 out2
if [ $? != 0 ]; then failures=`expr $failures + 1`; fi
@@ -66,26 +66,26 @@
# not. For that reason we run PrintFileTree with the -printCycles
# option when the output without this option differs to find(1).
find "$ROOT" -follow > out1
-$JAVA PrintFileTree -follow "$ROOT" > out2
+$JAVA ${TESTVMOPTS} PrintFileTree -follow "$ROOT" > out2
diff out1 out2
if [ $? != 0 ];
then
# re-run printing cycles to stdout
- $JAVA PrintFileTree -follow -printCycles "$ROOT" > out2
+ $JAVA ${TESTVMOPTS} PrintFileTree -follow -printCycles "$ROOT" > out2
diff out1 out2
if [ $? != 0 ]; then failures=`expr $failures + 1`; fi
fi
# test SKIP_SIBLINGS
-$JAVA SkipSiblings "$ROOT"
+$JAVA ${TESTVMOPTS} SkipSiblings "$ROOT"
if [ $? != 0 ]; then failures=`expr $failures + 1`; fi
# test TERMINATE
-$JAVA TerminateWalk "$ROOT"
+$JAVA ${TESTVMOPTS} TerminateWalk "$ROOT"
if [ $? != 0 ]; then failures=`expr $failures + 1`; fi
# test maxDepth
-$JAVA MaxDepth "$ROOT"
+$JAVA ${TESTVMOPTS} MaxDepth "$ROOT"
if [ $? != 0 ]; then failures=`expr $failures + 1`; fi
# clean-up
--- a/jdk/test/java/nio/file/Path/MacPathTest.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/nio/file/Path/MacPathTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -36,4 +36,4 @@
TESTCLASSES=.
fi
-export LC_ALL=en_US.UTF-8; ${TESTJAVA}/bin/java -cp ${TESTCLASSES} MacPathTest
+export LC_ALL=en_US.UTF-8; ${TESTJAVA}/bin/java ${TESTVMOPTS} -cp ${TESTCLASSES} MacPathTest
--- a/jdk/test/java/rmi/MarshalledObject/compare/Compare.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/MarshalledObject/compare/Compare.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,7 +29,6 @@
* not involved in location should be compared.
* @author Ken Arnold
*
- * @build Compare
* @run main Compare 11 annotatedRef
*/
--- a/jdk/test/java/rmi/MarshalledObject/compare/HashCode.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/MarshalledObject/compare/HashCode.java Mon Dec 17 08:30:06 2012 -0500
@@ -27,7 +27,6 @@
* @summary MarshalledObject with null throws NullPointerException
* @author Ken Arnold
*
- * @build HashCode
* @run main HashCode 11 annotatedRef
*/
--- a/jdk/test/java/rmi/MarshalledObject/compare/NullReference.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/MarshalledObject/compare/NullReference.java Mon Dec 17 08:30:06 2012 -0500
@@ -27,7 +27,6 @@
* @summary MarshalledObject with null throws NullPointerException
* @author Ken Arnold
*
- * @build NullReference
* @run main NullReference
*/
--- a/jdk/test/java/rmi/Naming/DefaultRegistryPort.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/Naming/DefaultRegistryPort.java Mon Dec 17 08:30:06 2012 -0500
@@ -28,7 +28,6 @@
* @author Dana Burns
* @library ../testlibrary
* @build TestLibrary
- * @build DefaultRegistryPort
* @run main DefaultRegistryPort
*/
--- a/jdk/test/java/rmi/Naming/LookupIPv6.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/Naming/LookupIPv6.java Mon Dec 17 08:30:06 2012 -0500
@@ -22,12 +22,11 @@
*/
/* @test
+ * @summary Ensure that java.rmi.Naming.lookup can handle URLs containing
+ * IPv6 addresses.
* @bug 4402708
*
* @run main/othervm -Djava.net.preferIPv6Addresses=true LookupIPv6
- *
- * @summary Ensure that java.rmi.Naming.lookup can handle URLs containing
- * IPv6 addresses.
*/
import java.net.InetAddress;
--- a/jdk/test/java/rmi/Naming/RmiIsNoScheme.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/Naming/RmiIsNoScheme.java Mon Dec 17 08:30:06 2012 -0500
@@ -30,7 +30,6 @@
*
* @library ../testlibrary
* @build TestLibrary
- * @build RmiIsNoScheme
* @run main/othervm RmiIsNoScheme
*/
--- a/jdk/test/java/rmi/Naming/UnderscoreHost.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/Naming/UnderscoreHost.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,8 +29,7 @@
* @author Vinod Johnson
*
* @library ../testlibrary
- * @build TestLibrary
- * @build UnderscoreHost UnderscoreHost_Stub
+ * @build TestLibrary UnderscoreHost_Stub
* @run main/othervm UnderscoreHost
*/
--- a/jdk/test/java/rmi/Naming/legalRegistryNames/LegalRegistryNames.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/Naming/legalRegistryNames/LegalRegistryNames.java Mon Dec 17 08:30:06 2012 -0500
@@ -21,14 +21,13 @@
* questions.
*/
-/**
+/*
* @test
* @bug 4254808
* @summary Naming assumes '/' is present in relative URL; change in URL causes regression
* @author Dana Burns
* @library ../../testlibrary
- * @build TestLibrary
- * @build Legal LegalRegistryNames LegalRegistryNames_Stub
+ * @build TestLibrary Legal LegalRegistryNames_Stub
* @run main LegalRegistryNames
*/
--- a/jdk/test/java/rmi/RMISecurityManager/checkPackageAccess/CheckPackageAccess.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/RMISecurityManager/checkPackageAccess/CheckPackageAccess.java Mon Dec 17 08:30:06 2012 -0500
@@ -28,10 +28,9 @@
* as when the default java.lang.SecurityManager is set, which with the
* default "java.security" file in the JDK means that access to packages in
* the sun.* package hierarchy is denied (without explicit runtime permission
- * "accessClassInPackge.*").
+ * "accessClassInPackage.*").
* @author Peter Jones
*
- * @build CheckPackageAccess
* @run main/othervm CheckPackageAccess
*/
--- a/jdk/test/java/rmi/activation/Activatable/checkActivateRef/CheckActivateRef.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/Activatable/checkActivateRef/CheckActivateRef.java Mon Dec 17 08:30:06 2012 -0500
@@ -36,8 +36,7 @@
* functionality is in place
*
* @library ../../../testlibrary
- * @build TestLibrary RMID
- * @build ActivateMe CheckActivateRef_Stub CheckActivateRef
+ * @build TestLibrary RMID ActivateMe CheckActivateRef_Stub
* @run main/othervm/policy=security.policy/timeout=240 -Djava.rmi.server.ignoreStubClasses=true CheckActivateRef
* @run main/othervm/policy=security.policy/timeout=240 -Djava.rmi.server.ignoreStubClasses=false CheckActivateRef
*/
--- a/jdk/test/java/rmi/activation/Activatable/checkAnnotations/CheckAnnotations.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/Activatable/checkAnnotations/CheckAnnotations.java Mon Dec 17 08:30:06 2012 -0500
@@ -28,10 +28,7 @@
* @author Laird Dornin; code borrowed from Ann Wollrath
*
* @library ../../../testlibrary
- * @build TestLibrary RMID JavaVM StreamPipe
- * @build MyRMI
- * @build CheckAnnotations
- * @build CheckAnnotations_Stub
+ * @build TestLibrary RMID MyRMI CheckAnnotations_Stub
* @run main/othervm/policy=security.policy/timeout=480 CheckAnnotations
*/
--- a/jdk/test/java/rmi/activation/Activatable/checkImplClassLoader/CheckImplClassLoader.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/Activatable/checkImplClassLoader/CheckImplClassLoader.java Mon Dec 17 08:30:06 2012 -0500
@@ -24,14 +24,11 @@
/* @test
* @bug 4289544
* @summary ActivationGroupImpl.newInstance does not set context classloader for impl
- *
* @author Laird Dornin; code borrowed from Ann Wollrath
*
* @library ../../../testlibrary
- * @build TestLibrary RMID JavaVM StreamPipe
- * @build MyRMI
- * @build CheckImplClassLoader ActivatableImpl
- * @build ActivatableImpl ActivatableImpl_Stub
+ * @build TestLibrary RMID
+ * MyRMI ActivatableImpl ActivatableImpl ActivatableImpl_Stub
* @run main/othervm/policy=security.policy/timeout=150 CheckImplClassLoader
*/
--- a/jdk/test/java/rmi/activation/Activatable/checkRegisterInLog/CheckRegisterInLog.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/Activatable/checkRegisterInLog/CheckRegisterInLog.java Mon Dec 17 08:30:06 2012 -0500
@@ -27,8 +27,8 @@
* @author Ann Wollrath
*
* @library ../../../testlibrary
- * @build RMID ActivationLibrary TestLibrary
- * @build ActivateMe CheckRegisterInLog CheckRegisterInLog_Stub
+ * @build TestLibrary RMID ActivationLibrary
+ * ActivateMe CheckRegisterInLog_Stub
* @run main/othervm/policy=security.policy/timeout=240 CheckRegisterInLog
*/
--- a/jdk/test/java/rmi/activation/Activatable/createPrivateActivable/CreatePrivateActivatable.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/Activatable/createPrivateActivable/CreatePrivateActivatable.java Mon Dec 17 08:30:06 2012 -0500
@@ -22,14 +22,12 @@
*/
/* @test
- * @author Laird Dornin
* @bug 4164971
* @summary allow non-public activatable class and/or constructor
+ * @author Laird Dornin
*
* @library ../../../testlibrary
- * @build TestLibrary RMID
- * @build ActivateMe
- * @build CreatePrivateActivatable
+ * @build TestLibrary RMID ActivateMe
* @run main/othervm/policy=security.policy/timeout=240 CreatePrivateActivatable
*/
--- a/jdk/test/java/rmi/activation/Activatable/downloadParameterClass/DownloadParameterClass.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/Activatable/downloadParameterClass/DownloadParameterClass.java Mon Dec 17 08:30:06 2012 -0500
@@ -32,11 +32,7 @@
*
* @library ../../../testlibrary
* @build TestLibrary RMID ActivationLibrary
- * @build DownloadParameterClass
- * @build Foo
- * @build FooReceiverImpl
- * @build FooReceiverImpl_Stub
- * @build Bar
+ * Foo FooReceiverImpl FooReceiverImpl_Stub Bar
* @run main/othervm/policy=security.policy/timeout=240 DownloadParameterClass
*/
--- a/jdk/test/java/rmi/activation/Activatable/elucidateNoSuchMethod/ElucidateNoSuchMethod.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/Activatable/elucidateNoSuchMethod/ElucidateNoSuchMethod.java Mon Dec 17 08:30:06 2012 -0500
@@ -23,14 +23,11 @@
/* @test
* @bug 4128620
- *
* @summary synopsis: NoSuchMethodError should be elucidated
- *
* @author Laird Dornin
*
* @library ../../../testlibrary
- * @build TestLibrary RMID
- * @build ActivateMe ElucidateNoSuchMethod ElucidateNoSuchMethod_Stub
+ * @build TestLibrary RMID ActivateMe ElucidateNoSuchMethod_Stub
* @run main/othervm/policy=security.policy/timeout=240 ElucidateNoSuchMethod
*/
--- a/jdk/test/java/rmi/activation/Activatable/extLoadedImpl/ext.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/Activatable/extLoadedImpl/ext.sh Mon Dec 17 08:30:06 2012 -0500
@@ -51,5 +51,5 @@
mkdir -p ext
$TESTJAVA/bin/jar cf ext/ext.jar -C $TESTCLASSES ExtLoadedImpl.class -C $TESTCLASSES ExtLoadedImpl_Stub.class -C $TESTCLASSES CheckLoader.class
-$TESTJAVA/bin/java -cp classes -Dtest.src=$TESTSRC -Dtest.classes=$TESTCLASSES -Djava.security.policy=$TESTSRC/security.policy -Djava.ext.dirs=ext ExtLoadedImplTest
+$TESTJAVA/bin/java ${TESTVMOPTS} -cp classes -Dtest.src=$TESTSRC -Dtest.classes=$TESTCLASSES -Djava.security.policy=$TESTSRC/security.policy -Djava.ext.dirs=ext ExtLoadedImplTest
--- a/jdk/test/java/rmi/activation/Activatable/forceLogSnapshot/ForceLogSnapshot.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/Activatable/forceLogSnapshot/ForceLogSnapshot.java Mon Dec 17 08:30:06 2012 -0500
@@ -27,9 +27,8 @@
* @author Laird Dornin
*
* @library ../../../testlibrary
- * @build ActivateMe
- * @build ForceLogSnapshot
- * @build ForceLogSnapshot_Stub
+ * @build TestLibrary RMID ActivationLibrary
+ * ActivateMe ForceLogSnapshot_Stub
* @run main/othervm/policy=security.policy/timeout=640 ForceLogSnapshot
*/
--- a/jdk/test/java/rmi/activation/Activatable/inactiveGroup/InactiveGroup.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/Activatable/inactiveGroup/InactiveGroup.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,10 +29,7 @@
* @author Ann Wollrath
*
* @library ../../../testlibrary
- * @build TestLibrary RMID ActivationLibrary
- * @build ActivateMe
- * @build InactiveGroup
- * @build InactiveGroup_Stub
+ * @build TestLibrary RMID ActivationLibrary ActivateMe InactiveGroup_Stub
* @run main/othervm/policy=security.policy/timeout=240 InactiveGroup
*/
--- a/jdk/test/java/rmi/activation/Activatable/lookupActivationSystem/LookupActivationSystem.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/Activatable/lookupActivationSystem/LookupActivationSystem.java Mon Dec 17 08:30:06 2012 -0500
@@ -24,14 +24,12 @@
/*
* @test
* @bug 6245733
- *
* @summary synopsis: rmid's registry's list operation doesn't include
* activation system
* @author Ann Wollrath
*
* @library ../../../testlibrary
* @build TestLibrary RMID ActivationLibrary
- * @build LookupActivationSystem
* @run main/othervm/timeout=240 LookupActivationSystem
*/
--- a/jdk/test/java/rmi/activation/Activatable/nestedActivate/NestedActivate.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/Activatable/nestedActivate/NestedActivate.java Mon Dec 17 08:30:06 2012 -0500
@@ -23,15 +23,11 @@
/* @test
* @bug 4138056
- *
* @summary synopsis: Activating objects from an Activatable constructor causes deadlock
* @author Ann Wollrath
*
* @library ../../../testlibrary
- * @build TestLibrary RMID ActivationLibrary
- * @build ActivateMe
- * @build NestedActivate
- * @build NestedActivate_Stub
+ * @build TestLibrary RMID ActivationLibrary ActivateMe NestedActivate_Stub
* @run main/othervm/policy=security.policy/timeout=240 NestedActivate
*/
--- a/jdk/test/java/rmi/activation/Activatable/nonExistentActivatable/NonExistentActivatable.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/Activatable/nonExistentActivatable/NonExistentActivatable.java Mon Dec 17 08:30:06 2012 -0500
@@ -23,16 +23,13 @@
/* @test
* @bug 4115296
- *
* @summary synopsis: NoSuchObjectException not thrown for non-existent
* activatable objects
* @author Ann Wollrath
*
* @library ../../../testlibrary
* @build TestLibrary RMID ActivationLibrary
- * @build ActivateMe
- * @build NonExistentActivatable
- * @build NonExistentActivatable_Stub
+ * ActivateMe NonExistentActivatable_Stub
* @run main/othervm/policy=security.policy/timeout=240 NonExistentActivatable
*/
--- a/jdk/test/java/rmi/activation/Activatable/restartCrashedService/RestartCrashedService.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/Activatable/restartCrashedService/RestartCrashedService.java Mon Dec 17 08:30:06 2012 -0500
@@ -28,10 +28,7 @@
* @author Ann Wollrath
*
* @library ../../../testlibrary
- * @build TestLibrary RMID JavaVM StreamPipe
- * @build ActivateMe
- * @build RestartCrashedService
- * @build RestartCrashedService_Stub
+ * @build TestLibrary RMID ActivateMe RestartCrashedService_Stub
* @run main/othervm/policy=security.policy/timeout=240 RestartCrashedService
*/
--- a/jdk/test/java/rmi/activation/Activatable/restartLatecomer/RestartLatecomer.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/Activatable/restartLatecomer/RestartLatecomer.java Mon Dec 17 08:30:06 2012 -0500
@@ -28,8 +28,7 @@
*
* @library ../../../testlibrary
* @build TestLibrary RMID ActivationLibrary
- * @build RestartLatecomer
- * @build RestartLatecomer_Stub
+ * RestartLatecomer RestartLatecomer_Stub
* @run main/othervm/policy=security.policy/timeout=240 RestartLatecomer
*/
--- a/jdk/test/java/rmi/activation/Activatable/restartService/RestartService.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/Activatable/restartService/RestartService.java Mon Dec 17 08:30:06 2012 -0500
@@ -23,15 +23,11 @@
/* @test
* @bug 4095165 4321151
-
* @summary synopsis: activator should restart daemon services
* @author Ann Wollrath
*
* @library ../../../testlibrary
- * @build TestLibrary RMID ActivationLibrary
- * @build ActivateMe
- * @build RestartService
- * @build RestartService_Stub
+ * @build TestLibrary RMID ActivationLibrary ActivateMe RestartService_Stub
* @run main/othervm/policy=security.policy/timeout=240 RestartService
*/
--- a/jdk/test/java/rmi/activation/Activatable/shutdownGracefully/ShutdownGracefully.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/Activatable/shutdownGracefully/ShutdownGracefully.java Mon Dec 17 08:30:06 2012 -0500
@@ -28,11 +28,8 @@
* @author Laird Dornin; code borrowed from Ann Wollrath
*
* @library ../../../testlibrary
- * @build TestLibrary RMID JavaVM StreamPipe
- * @build TestSecurityManager
- * @build RegisteringActivatable
- * @build ShutdownGracefully
- * @build ShutdownGracefully_Stub
+ * @build TestLibrary RMID
+ * TestSecurityManager RegisteringActivatable ShutdownGracefully_Stub
* @run main/othervm/policy=security.policy/timeout=700 ShutdownGracefully
*/
--- a/jdk/test/java/rmi/activation/Activatable/unregisterInactive/UnregisterInactive.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/Activatable/unregisterInactive/UnregisterInactive.java Mon Dec 17 08:30:06 2012 -0500
@@ -23,16 +23,12 @@
/* @test
* @bug 4115331
-
* @summary synopsis: activatable object fails to go inactive after
* unregister/inactive sequence.
* @author Ann Wollrath
*
* @library ../../../testlibrary
- * @build TestLibrary RMID ActivationLibrary
- * @build ActivateMe
- * @build UnregisterInactive
- * @build UnregisterInactive_Stub
+ * @build TestLibrary RMID ActivationLibrary ActivateMe UnregisterInactive_Stub
* @run main/othervm/policy=security.policy/timeout=240 UnregisterInactive
*/
--- a/jdk/test/java/rmi/activation/ActivateFailedException/activateFails/ActivateFails.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/ActivateFailedException/activateFails/ActivateFails.java Mon Dec 17 08:30:06 2012 -0500
@@ -31,11 +31,8 @@
* @author Ann Wollrath
*
* @library ../../../testlibrary
- * @build TestLibrary RMID JavaVM StreamPipe
- * @build ActivateMe
- * @build ActivateFails
- * @build ActivateFails_Stub
- * @build ShutdownThread
+ * @build TestLibrary RMID ActivationLibrary
+ * ActivateMe ActivateFails_Stub ShutdownThread
* @run main/othervm/policy=security.policy/timeout=240 ActivateFails
*/
--- a/jdk/test/java/rmi/activation/ActivationGroup/downloadActivationGroup/DownloadActivationGroup.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/ActivationGroup/downloadActivationGroup/DownloadActivationGroup.java Mon Dec 17 08:30:06 2012 -0500
@@ -33,9 +33,7 @@
*
* @library ../../../testlibrary
* @build TestLibrary RMID ActivationLibrary
- * @build MyActivationGroupImpl
- * @build DownloadActivationGroup
- * @build DownloadActivationGroup_Stub
+ * DownloadActivationGroup MyActivationGroupImpl DownloadActivationGroup_Stub
* @run main/othervm/policy=security.policy/timeout=240 DownloadActivationGroup
*/
--- a/jdk/test/java/rmi/activation/ActivationGroupDesc/checkDefaultGroupName/CheckDefaultGroupName.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/ActivationGroupDesc/checkDefaultGroupName/CheckDefaultGroupName.java Mon Dec 17 08:30:06 2012 -0500
@@ -21,16 +21,15 @@
* questions.
*/
-/**
+/*
* @test
* @bug 4252236
* @summary ActivationGroupDesc should not do early binding of default classname
- * @library ../../../testlibrary
+ * @author Laird Dornin
*
- * @build CheckDefaultGroupName
- *
+ * @library ../../../testlibrary
+ * @build TestLibrary
* @run main CheckDefaultGroupName
- * @author Laird Dornin
*/
import java.rmi.activation.*;
--- a/jdk/test/java/rmi/activation/ActivationSystem/activeGroup/IdempotentActiveGroup.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/ActivationSystem/activeGroup/IdempotentActiveGroup.java Mon Dec 17 08:30:06 2012 -0500
@@ -23,15 +23,13 @@
/* @test
* @bug 4720528
- *
* @summary synopsis: (spec) ActivationSystem.activeGroup spec should be
* relaxed (duplicate call to activeGroup with same instantiator and
* incarnation should not throw ActivationException; it should succeed)
* @author Ann Wollrath
*
* @library ../../../testlibrary
- * @build TestLibrary RMID
- * @build IdempotentActiveGroup
+ * @build TestLibrary RMID ActivationLibrary
* @run main/othervm/policy=security.policy/timeout=480 IdempotentActiveGroup
*/
--- a/jdk/test/java/rmi/activation/ActivationSystem/modifyDescriptor/ModifyDescriptor.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/ActivationSystem/modifyDescriptor/ModifyDescriptor.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,9 +29,8 @@
* @author Ann Wollrath
*
* @library ../../../testlibrary
- * @build ActivateMe
- * @build ModifyDescriptor
- * @build ModifyDescriptor_Stub
+ * @build TestLibrary RMID ActivationLibrary
+ * ActivateMe ModifyDescriptor_Stub
* @run main/othervm/policy=security.policy/timeout=240 ModifyDescriptor
*/
--- a/jdk/test/java/rmi/activation/ActivationSystem/stubClassesPermitted/StubClassesPermitted.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/ActivationSystem/stubClassesPermitted/StubClassesPermitted.java Mon Dec 17 08:30:06 2012 -0500
@@ -24,14 +24,11 @@
/* @test
* @bug 4179055
* @summary Some java apps need to have access to read "accessClassInPackage.sun.rmi.server"
- *
* @author Laird Dornin
*
* @library ../../../testlibrary
- * @build TestLibrary RMID JavaVM StreamPipe ActivationLibrary
- * @build CanCreateStubs
- * @build StubClassesPermitted
- * @build StubClassesPermitted_Stub
+ * @build TestLibrary RMID ActivationLibrary
+ * CanCreateStubs StubClassesPermitted_Stub
* @run main/othervm/policy=security.policy/secure=java.lang.SecurityManager/timeout=240 StubClassesPermitted
*/
--- a/jdk/test/java/rmi/activation/ActivationSystem/unregisterGroup/UnregisterGroup.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/ActivationSystem/unregisterGroup/UnregisterGroup.java Mon Dec 17 08:30:06 2012 -0500
@@ -24,16 +24,12 @@
/* @test
* @bug 4134233
* @bug 4213186
- *
* @summary synopsis: ActivationSystem.unregisterGroup should unregister objects in group
* @author Ann Wollrath
*
* @library ../../../testlibrary
- * @build TestLibrary RMID JavaVM StreamPipe
- * @build ActivateMe CallbackInterface
- * @build UnregisterGroup
- * @build UnregisterGroup_Stub
- * @build Callback_Stub
+ * @build TestLibrary RMID ActivationLibrary
+ * ActivateMe CallbackInterface UnregisterGroup_Stub Callback_Stub
* @run main/othervm/policy=security.policy/timeout=480 UnregisterGroup
*/
--- a/jdk/test/java/rmi/activation/CommandEnvironment/NullOptions.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/CommandEnvironment/NullOptions.java Mon Dec 17 08:30:06 2012 -0500
@@ -27,7 +27,6 @@
* ActivationGroupDesc.CommandEnvironment
* @author Ann Wollrath
*
- * @build NullOptions
* @run main/othervm/timeout=240 NullOptions
*/
--- a/jdk/test/java/rmi/activation/CommandEnvironment/SetChildEnv.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/CommandEnvironment/SetChildEnv.java Mon Dec 17 08:30:06 2012 -0500
@@ -35,9 +35,10 @@
* @author Adrian Colley
*
* @library ../../testlibrary
- * @build TestLibrary RMID JavaVM StreamPipe
- * @build Eliza Retireable Doctor Doctor_Stub SetChildEnv
- * @run main/othervm/timeout=240/policy=security.policy -Djava.compiler=NONE SetChildEnv
+ * @build TestLibrary RMID ActivationLibrary
+ * Eliza Retireable Doctor Doctor_Stub
+ * @run main/othervm/timeout=240/policy=security.policy
+ * -Djava.compiler=NONE SetChildEnv
*/
import java.rmi.*;
import java.util.Properties;
--- a/jdk/test/java/rmi/activation/checkusage/CheckUsage.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/checkusage/CheckUsage.java Mon Dec 17 08:30:06 2012 -0500
@@ -25,7 +25,7 @@
* @bug 4259564
*
* @library ../../testlibrary
- * @build TestLibrary JavaVM CheckUsage
+ * @build TestLibrary JavaVM
* @run main/othervm CheckUsage
*/
--- a/jdk/test/java/rmi/activation/log/LogTest.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/log/LogTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,7 +29,6 @@
* boundaries
* @author Ann Wollrath
*
- * @build LogTest
* @run main/othervm/timeout=240 LogTest
*/
--- a/jdk/test/java/rmi/activation/rmidViaInheritedChannel/InheritedChannelNotServerSocket.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/rmidViaInheritedChannel/InheritedChannelNotServerSocket.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,8 +29,7 @@
* @author Peter Jones
*
* @library ../../testlibrary
- * @build RMID ActivationLibrary
- * @build InheritedChannelNotServerSocket
+ * @build TestLibrary RMID ActivationLibrary
* @run main/othervm/timeout=240 InheritedChannelNotServerSocket
*/
--- a/jdk/test/java/rmi/activation/rmidViaInheritedChannel/RmidViaInheritedChannel.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/activation/rmidViaInheritedChannel/RmidViaInheritedChannel.java Mon Dec 17 08:30:06 2012 -0500
@@ -27,9 +27,7 @@
* @author Ann Wollrath
*
* @library ../../testlibrary
- * @build RMID ActivationLibrary
- * @build RmidViaInheritedChannel
- * @build TestLibrary
+ * @build TestLibrary RMID ActivationLibrary
* @run main/othervm/timeout=240 RmidViaInheritedChannel
*/
--- a/jdk/test/java/rmi/dgc/VMID/CheckVMID.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/dgc/VMID/CheckVMID.java Mon Dec 17 08:30:06 2012 -0500
@@ -30,7 +30,7 @@
* @author Ann Wollrath
*
* @library ../../testlibrary
- * @build CheckVMID
+ * @build TestLibrary
* @run main/othervm/policy=security.policy CheckVMID
*/
--- a/jdk/test/java/rmi/dgc/dgcAckFailure/DGCAckFailure.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/dgc/dgcAckFailure/DGCAckFailure.java Mon Dec 17 08:30:06 2012 -0500
@@ -30,8 +30,7 @@
* rather than pinning it indefinitely.
* @author Peter Jones
*
- * @build DGCAckFailure
- * @build DGCAckFailure_Stub
+ * @build DGCAckFailure DGCAckFailure_Stub
* @run main/othervm DGCAckFailure
*/
--- a/jdk/test/java/rmi/dgc/dgcImplInsulation/DGCImplInsulation.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/dgc/dgcImplInsulation/DGCImplInsulation.java Mon Dec 17 08:30:06 2012 -0500
@@ -31,9 +31,7 @@
* @author Peter Jones
*
* @library ../../testlibrary
- * @build TestLibrary
- * @build DGCImplInsulation
- * @build DGCImplInsulation_Stub
+ * @build TestLibrary DGCImplInsulation_Stub
* @run main/othervm/policy=security.policy DGCImplInsulation
*/
--- a/jdk/test/java/rmi/dgc/retryDirtyCalls/RetryDirtyCalls.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/dgc/retryDirtyCalls/RetryDirtyCalls.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,8 +29,7 @@
* renewing that lease at all after the first failure.
* @author Peter Jones (inspired by Adrian Colley's test case in 4268258)
*
- * @build RetryDirtyCalls
- * @build RetryDirtyCalls_Stub
+ * @build RetryDirtyCalls RetryDirtyCalls_Stub
* @run main/othervm RetryDirtyCalls
*/
--- a/jdk/test/java/rmi/invalidName/InvalidName.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/invalidName/InvalidName.java Mon Dec 17 08:30:06 2012 -0500
@@ -32,7 +32,7 @@
* @author Laird Dornin
*
* @library ../testlibrary
- * @build InvalidName
+ * @build TestLibrary
* @run main/othervm InvalidName
*/
--- a/jdk/test/java/rmi/registry/altSecurityManager/AltSecurityManager.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/registry/altSecurityManager/AltSecurityManager.java Mon Dec 17 08:30:06 2012 -0500
@@ -27,8 +27,7 @@
* @author Laird Dornin
*
* @library ../../testlibrary
- * @build StreamPipe TestParams TestLibrary JavaVM RMID
- * @build AltSecurityManager TestSecurityManager
+ * @build TestLibrary JavaVM RMID TestSecurityManager
* @run main/othervm AltSecurityManager
*/
--- a/jdk/test/java/rmi/registry/checkusage/CheckUsage.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/registry/checkusage/CheckUsage.java Mon Dec 17 08:30:06 2012 -0500
@@ -27,7 +27,7 @@
* @author Laird Dornin
*
* @library ../../testlibrary
- * @build TestLibrary JavaVM CheckUsage
+ * @build TestLibrary JavaVM
* @run main/othervm CheckUsage
*/
--- a/jdk/test/java/rmi/registry/classPathCodebase/ClassPathCodebase.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/registry/classPathCodebase/ClassPathCodebase.java Mon Dec 17 08:30:06 2012 -0500
@@ -30,7 +30,7 @@
* @author Peter Jones
*
* @library ../../testlibrary
- * @build ClassPathCodebase Dummy TestLibrary
+ * @build TestLibrary Dummy
* @run main/othervm/policy=security.policy ClassPathCodebase
*/
--- a/jdk/test/java/rmi/registry/interfaceHash/InterfaceHash.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/registry/interfaceHash/InterfaceHash.java Mon Dec 17 08:30:06 2012 -0500
@@ -36,9 +36,7 @@
*
* @author Peter Jones
* @library ../../testlibrary
- * @build InterfaceHash
- * @build ReferenceRegistryStub
- * @build TestLibrary
+ * @build TestLibrary ReferenceRegistryStub
* @run main/othervm InterfaceHash
*/
--- a/jdk/test/java/rmi/registry/multipleRegistries/MultipleRegistries.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/registry/multipleRegistries/MultipleRegistries.java Mon Dec 17 08:30:06 2012 -0500
@@ -28,7 +28,6 @@
*
* @library ../../testlibrary
* @build TestLibrary
- * @build MultipleRegistries
* @run main/othervm/timeout=240 MultipleRegistries
*/
--- a/jdk/test/java/rmi/registry/readTest/readTest.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/registry/readTest/readTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -76,7 +76,7 @@
;;
esac
# trailing / after code base is important for rmi codebase property.
-${TESTJAVA}${FS}bin${FS}java -cp $TEST_CLASSPATH -Djava.rmi.server.codebase=${FILEURL}$CODEBASE/ readTest > OUT.TXT 2>&1 &
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -cp $TEST_CLASSPATH -Djava.rmi.server.codebase=${FILEURL}$CODEBASE/ readTest > OUT.TXT 2>&1 &
TEST_PID=$!
#bulk of testcase - let it run for a while
sleep 5
--- a/jdk/test/java/rmi/registry/reexport/Reexport.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/registry/reexport/Reexport.java Mon Dec 17 08:30:06 2012 -0500
@@ -25,9 +25,7 @@
* @bug 4120329
* @summary RMI registry creation is impossible if first attempt fails.
* @library ../../testlibrary
- * @build StreamPipe TestParams TestLibrary JavaVM
- * @build RegistryRunner RegistryRunner_Stub
- * @build Reexport
+ * @build TestLibrary JavaVM RegistryRunner RegistryRunner_Stub
* @run main/othervm Reexport
*/
--- a/jdk/test/java/rmi/reliability/benchmark/runRmiBench.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/reliability/benchmark/runRmiBench.sh Mon Dec 17 08:30:06 2012 -0500
@@ -27,20 +27,20 @@
# used to run the test under JTREG.
#
# @build bench.BenchInfo bench.HtmlReporter bench.Util bench.Benchmark
-# @build bench.Reporter bench.XmlReporter bench.ConfigFormatException
-# @build bench.Harness bench.TextReporter bench.rmi.BenchServer
-# @build bench.rmi.DoubleArrayCalls bench.rmi.LongCalls bench.rmi.ShortCalls
-# @build bench.rmi.BenchServerImpl bench.rmi.DoubleCalls
-# @build bench.rmi.Main bench.rmi.SmallObjTreeCalls
-# @build bench.rmi.BooleanArrayCalls bench.rmi.ExceptionCalls
-# @build bench.rmi.NullCalls bench.rmi.BooleanCalls bench.rmi.ExportObjs
-# @build bench.rmi.ObjArrayCalls bench.rmi.ByteArrayCalls
-# @build bench.rmi.FloatArrayCalls bench.rmi.ObjTreeCalls
-# @build bench.rmi.ByteCalls bench.rmi.FloatCalls bench.rmi.ProxyArrayCalls
-# @build bench.rmi.CharArrayCalls bench.rmi.IntArrayCalls
-# @build bench.rmi.RemoteObjArrayCalls bench.rmi.CharCalls bench.rmi.IntCalls
-# @build bench.rmi.ClassLoading bench.rmi.LongArrayCalls
-# @build bench.rmi.ShortArrayCalls bench.rmi.altroot.Node
+# bench.Reporter bench.XmlReporter bench.ConfigFormatException
+# bench.Harness bench.TextReporter bench.rmi.BenchServer
+# bench.rmi.DoubleArrayCalls bench.rmi.LongCalls bench.rmi.ShortCalls
+# bench.rmi.BenchServerImpl bench.rmi.DoubleCalls
+# bench.rmi.Main bench.rmi.SmallObjTreeCalls
+# bench.rmi.BooleanArrayCalls bench.rmi.ExceptionCalls
+# bench.rmi.NullCalls bench.rmi.BooleanCalls bench.rmi.ExportObjs
+# bench.rmi.ObjArrayCalls bench.rmi.ByteArrayCalls
+# bench.rmi.FloatArrayCalls bench.rmi.ObjTreeCalls
+# bench.rmi.ByteCalls bench.rmi.FloatCalls bench.rmi.ProxyArrayCalls
+# bench.rmi.CharArrayCalls bench.rmi.IntArrayCalls
+# bench.rmi.RemoteObjArrayCalls bench.rmi.CharCalls bench.rmi.IntCalls
+# bench.rmi.ClassLoading bench.rmi.LongArrayCalls
+# bench.rmi.ShortArrayCalls bench.rmi.altroot.Node
#
# @run shell/timeout=1800 runRmiBench.sh
#
--- a/jdk/test/java/rmi/reliability/benchmark/runSerialBench.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/reliability/benchmark/runSerialBench.sh Mon Dec 17 08:30:06 2012 -0500
@@ -51,7 +51,7 @@
echo "Starting serialization benchmark "
$TESTJAVA/bin/java \
- -server \
+ ${TESTVMOPTS} \
-cp $TESTCLASSES \
bench.serial.Main \
-c $TESTSRC/bench/serial/jtreg-config &
--- a/jdk/test/java/rmi/reliability/juicer/AppleUserImpl.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/reliability/juicer/AppleUserImpl.java Mon Dec 17 08:30:06 2012 -0500
@@ -55,11 +55,10 @@
* has been reached.
*
* @library ../../testlibrary
- *
- * @build Apple AppleEvent AppleImpl AppleUserImpl
- * @build Orange OrangeEcho OrangeEchoImpl OrangeImpl
- * @build ApplicationServer
* @build TestLibrary
+ * Apple AppleEvent AppleImpl
+ * Orange OrangeEcho OrangeEchoImpl OrangeImpl
+ * ApplicationServer
*
* @run main/othervm/policy=security.policy AppleUserImpl -seconds 30
*
--- a/jdk/test/java/rmi/server/ObjID/randomIDs/RandomIDs.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/ObjID/randomIDs/RandomIDs.java Mon Dec 17 08:30:06 2012 -0500
@@ -30,7 +30,6 @@
* ObjID() should still generate sequential object numbers.
* @author Peter Jones
*
- * @build RandomIDs
* @run main/othervm RandomIDs random
* @run main/othervm -Djava.rmi.server.randomIDs=true RandomIDs random
* @run main/othervm -Djava.rmi.server.randomIDs=false RandomIDs sequential
--- a/jdk/test/java/rmi/server/RMIClassLoader/delegateBeforePermissionCheck/DelegateBeforePermissionCheck.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/RMIClassLoader/delegateBeforePermissionCheck/DelegateBeforePermissionCheck.java Mon Dec 17 08:30:06 2012 -0500
@@ -33,9 +33,7 @@
* @author Peter Jones
*
* @library ../../../testlibrary
- * @build TestLibrary
- * @build DelegateBeforePermissionCheck
- * @build Foo
+ * @build TestLibrary Foo
* @run main/othervm DelegateBeforePermissionCheck
*/
--- a/jdk/test/java/rmi/server/RMIClassLoader/delegateToContextLoader/DelegateToContextLoader.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/RMIClassLoader/delegateToContextLoader/DelegateToContextLoader.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,7 +29,7 @@
* @author Peter Jones
*
* @library ../../../testlibrary
- * @build DelegateToContextLoader Dummy
+ * @build TestLibrary Dummy
* @run main/othervm/policy=security.policy/timeout=120 DelegateToContextLoader
*/
--- a/jdk/test/java/rmi/server/RMIClassLoader/downloadArrayClass/DownloadArrayClass.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/RMIClassLoader/downloadArrayClass/DownloadArrayClass.java Mon Dec 17 08:30:06 2012 -0500
@@ -31,11 +31,7 @@
* @author Peter Jones
*
* @library ../../../testlibrary
- * @build TestLibrary
- * @build Receiver
- * @build DownloadArrayClass
- * @build DownloadArrayClass_Stub
- * @build Foo
+ * @build TestLibrary Receiver DownloadArrayClass_Stub Foo
* @run main/othervm/policy=security.policy DownloadArrayClass
*/
--- a/jdk/test/java/rmi/server/RMIClassLoader/getClassAnnotation/NullClass.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/RMIClassLoader/getClassAnnotation/NullClass.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,7 +29,6 @@
*
* @library ../../../testlibrary
* @build TestLibrary
- * @build NullClass
* @run main/othervm NullClass
*/
--- a/jdk/test/java/rmi/server/RMIClassLoader/getClassLoader/GetClassLoader.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/RMIClassLoader/getClassLoader/GetClassLoader.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,9 +29,7 @@
* @author Ann Wollrath
*
* @library ../../../testlibrary
- * @build TestLibrary
- * @build GetClassLoader
- * @build Foo
+ * @build TestLibrary Foo
* @run main/othervm/policy=security.policy GetClassLoader
*/
--- a/jdk/test/java/rmi/server/RMIClassLoader/loadProxyClasses/LoadProxyClasses.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/RMIClassLoader/loadProxyClasses/LoadProxyClasses.java Mon Dec 17 08:30:06 2012 -0500
@@ -30,8 +30,8 @@
* @author Laird Dornin
*
* @library ../../../testlibrary
- * @build TestLibrary FnnClass FnnUnmarshal LoadProxyClasses NonpublicInterface
- * @build NonpublicInterface1 PublicInterface PublicInterface1
+ * @build TestLibrary FnnClass FnnUnmarshal NonpublicInterface
+ * NonpublicInterface1 PublicInterface PublicInterface1
* @run main/othervm/policy=security.policy LoadProxyClasses
*/
--- a/jdk/test/java/rmi/server/RMIClassLoader/noSecurityManager/NoSecurityManager.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/RMIClassLoader/noSecurityManager/NoSecurityManager.java Mon Dec 17 08:30:06 2012 -0500
@@ -30,7 +30,7 @@
* been used for the RMI class loader instance.
* @author Peter Jones
*
- * @build NoSecurityManager Dummy LocalDummy
+ * @build Dummy LocalDummy
* @run main/othervm/timeout=120 NoSecurityManager
*/
--- a/jdk/test/java/rmi/server/RMIClassLoader/spi/ContextInsulation.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/RMIClassLoader/spi/ContextInsulation.java Mon Dec 17 08:30:06 2012 -0500
@@ -30,11 +30,7 @@
* @author Peter Jones
*
* @library ../../../testlibrary
- * @build TestLibrary
- * @build ContextInsulation
- * @build ServiceConfiguration
- * @build TestProvider
- * @build TestProvider2
+ * @build TestLibrary ServiceConfiguration TestProvider TestProvider2
* @run main/othervm/policy=security.policy ContextInsulation
*/
--- a/jdk/test/java/rmi/server/RMIClassLoader/spi/DefaultProperty.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/RMIClassLoader/spi/DefaultProperty.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,10 +29,7 @@
* @author Peter Jones
*
* @library ../../../testlibrary
- * @build TestLibrary
- * @build DefaultProperty
- * @build ServiceConfiguration
- * @build Foo
+ * @build TestLibrary ServiceConfiguration Foo
* @run main/othervm/policy=security.policy DefaultProperty
*/
--- a/jdk/test/java/rmi/server/RMIClassLoader/spi/Installed.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/RMIClassLoader/spi/Installed.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,11 +29,7 @@
* @author Peter Jones
*
* @library ../../../testlibrary
- * @build TestLibrary
- * @build Installed
- * @build ServiceConfiguration
- * @build TestProvider
- * @build TestProvider2
+ * @build TestLibrary ServiceConfiguration TestProvider TestProvider2
* @run main/othervm/policy=security.policy Installed
*/
--- a/jdk/test/java/rmi/server/RMIClassLoader/spi/InvalidProperty.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/RMIClassLoader/spi/InvalidProperty.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,9 +29,7 @@
* @author Peter Jones
*
* @library ../../../testlibrary
- * @build TestLibrary
- * @build InvalidProperty
- * @build ServiceConfiguration
+ * @build TestLibrary ServiceConfiguration
* @run main/othervm/policy=security.policy InvalidProperty
*/
--- a/jdk/test/java/rmi/server/RMIClassLoader/spi/Property.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/RMIClassLoader/spi/Property.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,10 +29,7 @@
* @author Peter Jones
*
* @library ../../../testlibrary
- * @build TestLibrary
- * @build Property
- * @build ServiceConfiguration
- * @build TestProvider
+ * @build TestLibrary ServiceConfiguration TestProvider
* @run main/othervm/policy=security.policy Property
*/
--- a/jdk/test/java/rmi/server/RMIClassLoader/useCodebaseOnly/UseCodebaseOnly.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/RMIClassLoader/useCodebaseOnly/UseCodebaseOnly.java Mon Dec 17 08:30:06 2012 -0500
@@ -31,12 +31,7 @@
* @author Peter Jones
*
* @library ../../../testlibrary
- * @build TestLibrary
- * @build Receiver
- * @build UseCodebaseOnly
- * @build UseCodebaseOnly_Stub
- * @build Foo
- * @build Bar
+ * @build TestLibrary Receiver UseCodebaseOnly_Stub Foo Bar
* @run main/othervm/policy=security.policy UseCodebaseOnly
*/
--- a/jdk/test/java/rmi/server/RMIClassLoader/useGetURLs/UseGetURLs.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/RMIClassLoader/useGetURLs/UseGetURLs.java Mon Dec 17 08:30:06 2012 -0500
@@ -30,8 +30,7 @@
* @author Peter Jones
*
* @library ../../../testlibrary
- * @build TestLibrary
- * @build UseGetURLs Dummy
+ * @build TestLibrary Dummy
* @run main/othervm/policy=security.policy/timeout=120 UseGetURLs
*/
--- a/jdk/test/java/rmi/server/RMISocketFactory/useSocketFactory/activatable/UseCustomSocketFactory.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/RMISocketFactory/useSocketFactory/activatable/UseCustomSocketFactory.java Mon Dec 17 08:30:06 2012 -0500
@@ -28,11 +28,7 @@
* @author Ann Wollrath
*
* @library ../../../../testlibrary
- * @build Echo
- * @build EchoImpl
- * @build EchoImpl_Stub
- * @build UseCustomSocketFactory
- * @build TestLibrary
+ * @build TestLibrary Echo EchoImpl EchoImpl_Stub
* @run main/othervm/policy=security.policy/timeout=360 UseCustomSocketFactory
*/
--- a/jdk/test/java/rmi/server/RMISocketFactory/useSocketFactory/registry/UseCustomSocketFactory.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/RMISocketFactory/useSocketFactory/registry/UseCustomSocketFactory.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,12 +29,7 @@
* @author Laird Dornin; code borrowed from Ann Wollrath
*
* @library ../../../../testlibrary
- * @build Hello
- * @build HelloImpl
- * @build HelloImpl_Stub
- * @build TestLibrary
- * @build UseCustomSocketFactory
- * @build Compress
+ * @build TestLibrary Compress Hello HelloImpl HelloImpl_Stub
* @run main/othervm/policy=security.policy/timeout=240 UseCustomSocketFactory
*/
--- a/jdk/test/java/rmi/server/RMISocketFactory/useSocketFactory/unicast/UseCustomSocketFactory.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/RMISocketFactory/useSocketFactory/unicast/UseCustomSocketFactory.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,11 +29,7 @@
* @author Ann Wollrath
*
* @library ../../../../testlibrary
- * @build TestLibrary RMID JavaVM StreamPipe
- * @build Echo
- * @build EchoImpl
- * @build EchoImpl_Stub
- * @build UseCustomSocketFactory
+ * @build TestLibrary RMID JavaVM Echo EchoImpl EchoImpl_Stub
* @run main/othervm/policy=security.policy/timeout=120 UseCustomSocketFactory
*/
--- a/jdk/test/java/rmi/server/RemoteObject/notExtending/NotExtending.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/RemoteObject/notExtending/NotExtending.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,9 +29,7 @@
* (specifically: stubs) that contain the instance's RemoteRef.
* @author Peter Jones
*
- * @build NotExtending
- * @build NotExtending_Stub
- * @build NotExtending_Skel
+ * @build NotExtending_Stub NotExtending_Skel
* @run main/othervm/timeout=240 NotExtending
*/
--- a/jdk/test/java/rmi/server/RemoteObject/verifyRemoteEquals/VerifyRemoteEquals.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/RemoteObject/verifyRemoteEquals/VerifyRemoteEquals.java Mon Dec 17 08:30:06 2012 -0500
@@ -21,16 +21,16 @@
* questions.
*/
-/**
+/*
* @test
* @bug 4251010
* @summary equals does not works on stub objects created with
* custom socket AndFactory
- * @library ../../../testlibrary
+ * @author Laird Dornin
*
- * @build VerifyRemoteEquals
+ * @library ../../../testlibrary
+ * @build TestLibrary
* @run main/othervm/timeout=40 VerifyRemoteEquals
- * @author Laird Dornin
*/
import java.io.*;
--- a/jdk/test/java/rmi/server/RemoteServer/AddrInUse.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/RemoteServer/AddrInUse.java Mon Dec 17 08:30:06 2012 -0500
@@ -26,8 +26,6 @@
* @summary retryServerSocket should not retry on BindException
* @author Ann Wollrath
*
- * @library ../../testlibrary
- * @build AddrInUse
* @run main/othervm AddrInUse
*/
--- a/jdk/test/java/rmi/server/UnicastRemoteObject/changeHostName/ChangeHostName.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/UnicastRemoteObject/changeHostName/ChangeHostName.java Mon Dec 17 08:30:06 2012 -0500
@@ -31,8 +31,7 @@
*
* @author Ann Wollrath
*
- * @build ChangeHostName
- * @build ChangeHostName_Stub
+ * @build ChangeHostName ChangeHostName_Stub
* @run main/othervm ChangeHostName
*/
--- a/jdk/test/java/rmi/server/UnicastRemoteObject/exportObject/GcDuringExport.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/UnicastRemoteObject/exportObject/GcDuringExport.java Mon Dec 17 08:30:06 2012 -0500
@@ -30,6 +30,7 @@
* @bug 6597112
* @summary GC'ing objects whilst being exported to RMI should not cause exceptions
* @author Neil Richards <neil.richards@ngmr.net>, <neil_richards@uk.ibm.com>
+ * @run main GcDuringExport
*/
import java.rmi.Remote;
--- a/jdk/test/java/rmi/server/UnicastRemoteObject/keepAliveDuringCall/KeepAliveDuringCall.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/UnicastRemoteObject/keepAliveDuringCall/KeepAliveDuringCall.java Mon Dec 17 08:30:06 2012 -0500
@@ -32,14 +32,8 @@
* @author Peter Jones
*
* @library ../../../testlibrary
- * @build TestLibrary
- * @build JavaVM
- * @build KeepAliveDuringCall
- * @build KeepAliveDuringCall_Stub
- * @build ShutdownMonitor
- * @build Shutdown
- * @build ShutdownImpl
- * @build ShutdownImpl_Stub
+ * @build TestLibrary JavaVM KeepAliveDuringCall_Stub
+ * ShutdownMonitor Shutdown ShutdownImpl ShutdownImpl_Stub
* @run main/othervm KeepAliveDuringCall
*/
--- a/jdk/test/java/rmi/server/UnicastRemoteObject/marshalAfterUnexport/MarshalAfterUnexport.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/UnicastRemoteObject/marshalAfterUnexport/MarshalAfterUnexport.java Mon Dec 17 08:30:06 2012 -0500
@@ -31,8 +31,7 @@
* IOException (see fix for bugid 4017232).
* @author Peter Jones
*
- * @build MarshalAfterUnexport
- * @build MarshalAfterUnexport_Stub
+ * @build MarshalAfterUnexport MarshalAfterUnexport_Stub
* @run main/othervm MarshalAfterUnexport
*/
--- a/jdk/test/java/rmi/server/UnicastRemoteObject/marshalAfterUnexport/MarshalAfterUnexport2.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/UnicastRemoteObject/marshalAfterUnexport/MarshalAfterUnexport2.java Mon Dec 17 08:30:06 2012 -0500
@@ -33,8 +33,7 @@
* @author Peter Jones
* @author Ann Wollrath
*
- * @build MarshalAfterUnexport2
- * @build MarshalAfterUnexport2_Stub
+ * @build MarshalAfterUnexport2 MarshalAfterUnexport2_Stub
* @run main/othervm MarshalAfterUnexport2
*/
--- a/jdk/test/java/rmi/server/UnicastRemoteObject/unexportObject/UnexportLeak.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/UnicastRemoteObject/unexportObject/UnexportLeak.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,10 +29,7 @@
* @author Ann Wollrath
*
* @library ../../../testlibrary
- * @build UnexportLeak
- * @build UnexportLeak_Stub
- * @build TestLibrary
- * @build Ping
+ * @build TestLibrary UnexportLeak_Stub Ping
* @run main/othervm UnexportLeak
*/
--- a/jdk/test/java/rmi/server/Unmarshal/PrimitiveClasses.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/Unmarshal/PrimitiveClasses.java Mon Dec 17 08:30:06 2012 -0500
@@ -25,6 +25,7 @@
* @bug 4442373
* @summary Verify that RMI can successfully unmarshal Class objects for
* primitive types.
+ * @run main PrimitiveClasses
*/
import java.rmi.MarshalledObject;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/rmi/server/Unmarshal/checkUnmarshalOnStopThread/CheckUnmarshal.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ *
+ */
+import java.rmi.Remote;
+import java.rmi.RemoteException;
+ /*
+ * Interface with methods to exercise RMI parameter marshalling
+ * and unmarshalling.
+ */
+ interface CheckUnmarshal extends java.rmi.Remote {
+ public PoisonPill getPoisonPill() throws RemoteException;
+ public Object ping() throws RemoteException;
+ public void passRuntimeExceptionParameter(
+ RuntimeExceptionParameter rep)
+ throws RemoteException;
+ }
--- a/jdk/test/java/rmi/server/Unmarshal/checkUnmarshalOnStopThread/CheckUnmarshalOnStopThread.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/Unmarshal/checkUnmarshalOnStopThread/CheckUnmarshalOnStopThread.java Mon Dec 17 08:30:06 2012 -0500
@@ -31,10 +31,8 @@
* @author Laird Dornin
*
* @library ../../../testlibrary
- * @build TestLibrary RMID JavaVM StreamPipe
- * @build CheckUnmarshall PoisonPill RuntimeExceptionParameter
- * @build CheckUnmarshalOnStopThread
- * @build CheckUnmarshalOnStopThread_Stub
+ * @build TestLibrary CheckUnmarshal CheckUnmarshalOnStopThread_Stub
+ * PoisonPill RuntimeExceptionParameter
* @run main/othervm/timeout=480 CheckUnmarshalOnStopThread
*/
--- a/jdk/test/java/rmi/server/Unmarshal/checkUnmarshalOnStopThread/CheckUnmarshall.java Mon Dec 17 08:28:27 2012 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/**
- *
- */
-import java.rmi.Remote;
-import java.rmi.RemoteException;
- /*
- * Interface with methods to exercise RMI parameter marshalling
- * and unmarshalling.
- */
- interface CheckUnmarshal extends java.rmi.Remote {
- public PoisonPill getPoisonPill() throws RemoteException;
- public Object ping() throws RemoteException;
- public void passRuntimeExceptionParameter(
- RuntimeExceptionParameter rep)
- throws RemoteException;
- }
--- a/jdk/test/java/rmi/server/Unreferenced/finiteGCLatency/FiniteGCLatency.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/Unreferenced/finiteGCLatency/FiniteGCLatency.java Mon Dec 17 08:30:06 2012 -0500
@@ -35,9 +35,7 @@
* @author Peter Jones
*
* @library ../../../testlibrary
- * @build FiniteGCLatency
- * @build FiniteGCLatency_Stub
- * @build TestLibrary
+ * @build TestLibrary FiniteGCLatency_Stub
* @run main/othervm/timeout=120 FiniteGCLatency
*/
--- a/jdk/test/java/rmi/server/Unreferenced/leaseCheckInterval/LeaseCheckInterval.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/Unreferenced/leaseCheckInterval/LeaseCheckInterval.java Mon Dec 17 08:30:06 2012 -0500
@@ -37,11 +37,7 @@
* @author Peter Jones
*
* @library ../../../testlibrary
- * @build TestLibrary
- * @build JavaVM
- * @build LeaseCheckInterval
- * @build LeaseCheckInterval_Stub
- * @build SelfTerminator
+ * @build TestLibrary JavaVM LeaseCheckInterval_Stub SelfTerminator
* @run main/othervm LeaseCheckInterval
*/
--- a/jdk/test/java/rmi/server/Unreferenced/marshalledObjectGet/MarshalledObjectGet.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/Unreferenced/marshalledObjectGet/MarshalledObjectGet.java Mon Dec 17 08:30:06 2012 -0500
@@ -30,8 +30,6 @@
* invoked.
* @author Peter Jones
*
- * @library ../../../testlibrary
- * @build MarshalledObjectGet
* @build MarshalledObjectGet_Stub
* @run main/othervm/timeout=120 MarshalledObjectGet
*/
--- a/jdk/test/java/rmi/server/Unreferenced/unreferencedContext/UnreferencedContext.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/Unreferenced/unreferencedContext/UnreferencedContext.java Mon Dec 17 08:30:06 2012 -0500
@@ -39,9 +39,7 @@
* @author Laird Dornin
*
* @library ../../../testlibrary
- * @build UnreferencedContext
- * @build UnreferencedContext_Stub
- * @build TestLibrary
+ * @build TestLibrary UnreferencedContext_Stub
* @run main/othervm/timeout=120 UnreferencedContext
*/
--- a/jdk/test/java/rmi/server/clientStackTrace/ClientStackTrace.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/clientStackTrace/ClientStackTrace.java Mon Dec 17 08:30:06 2012 -0500
@@ -28,7 +28,7 @@
* @author Laird Dornin
*
* @library ../../testlibrary
- * @build ClientStackTrace MyRemoteObject_Stub TestLibrary TestParams
+ * @build TestLibrary ClientStackTrace MyRemoteObject_Stub
* @run main/othervm/policy=security.policy/timeout=120 ClientStackTrace
*/
--- a/jdk/test/java/rmi/server/getRemoteClass/GetRemoteClass.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/getRemoteClass/GetRemoteClass.java Mon Dec 17 08:30:06 2012 -0500
@@ -28,7 +28,7 @@
* @author Laird Dornin
*
* @library ../../testlibrary
- * @build GetRemoteClass TestLibrary TestParams
+ * @build TestLibrary
* @run main/othervm GetRemoteClass
*/
--- a/jdk/test/java/rmi/server/serverStackTrace/ServerStackTrace.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/serverStackTrace/ServerStackTrace.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,8 +29,7 @@
* serialized with the Throwable from the server.
* @author Peter Jones
*
- * @build ServerStackTrace
- * @build ServerStackTrace_Stub
+ * @build ServerStackTrace ServerStackTrace_Stub
* @run main/othervm ServerStackTrace
*/
--- a/jdk/test/java/rmi/server/serverStackTrace/SuppressStackTraces.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/serverStackTrace/SuppressStackTraces.java Mon Dec 17 08:30:06 2012 -0500
@@ -36,10 +36,7 @@
* for reasons of performance or confidentiality requirements.
* @author Peter Jones
*
- * @build SuppressStackTraces
- * @build Impl2_Stub
- * @build Impl1_Stub
- * @build Impl1_Skel
+ * @build SuppressStackTraces Impl2_Stub Impl1_Stub Impl1_Skel
* @run main/othervm SuppressStackTraces
*/
--- a/jdk/test/java/rmi/server/useCustomRef/UseCustomRef.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/server/useCustomRef/UseCustomRef.java Mon Dec 17 08:30:06 2012 -0500
@@ -31,11 +31,7 @@
* 4180392
*
* @library ../../testlibrary
- * @build UseCustomRef
- * @build Ping
- * @build UseCustomRef_Stub
- * @build UseCustomRef_Skel
- * @build TestLibrary
+ * @build TestLibrary Ping UseCustomRef_Stub UseCustomRef_Skel
* @run main/othervm/policy=security.policy/secure=java.rmi.RMISecurityManager/timeout=120 UseCustomRef
*
* This test was failing to run because the synthetic access
--- a/jdk/test/java/rmi/testlibrary/TestLibrary.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/testlibrary/TestLibrary.java Mon Dec 17 08:30:06 2012 -0500
@@ -54,6 +54,7 @@
import java.rmi.server.UnicastRemoteObject;
import java.util.Enumeration;
import java.util.Properties;
+
import sun.rmi.registry.RegistryImpl;
import sun.rmi.server.UnicastServerRef;
import sun.rmi.transport.Endpoint;
@@ -92,6 +93,7 @@
public final static int INHERITEDCHANNELNOTSERVERSOCKET_ACTIVATION_PORT = 64003;
public final static int INHERITEDCHANNELNOTSERVERSOCKET_REGISTRY_PORT = 64004;
public final static int READTEST_REGISTRY_PORT = 64005;
+ private final static int MAX_SERVER_SOCKET_TRIES = 10;
static void mesg(Object mesg) {
System.err.println("TEST_LIBRARY: " + mesg.toString());
@@ -125,36 +127,15 @@
bomb(null, e);
}
- /**
- * Property accessors
- */
- private static boolean getBoolean(String name) {
- return (new Boolean(getProperty(name, "false")).booleanValue());
- }
- private static Integer getInteger(String name) {
- int val = 0;
- Integer value = null;
-
- String propVal = getProperty(name, null);
- if (propVal == null) {
- return null;
- }
-
- try {
- value = new Integer(Integer.parseInt(propVal));
- } catch (NumberFormatException nfe) {
- }
- return value;
- }
public static String getProperty(String property, String defaultVal) {
final String prop = property;
final String def = defaultVal;
- return ((String) java.security.AccessController.doPrivileged
- (new java.security.PrivilegedAction() {
- public Object run() {
+ return java.security.AccessController.doPrivileged(
+ new java.security.PrivilegedAction<String>() {
+ public String run() {
return System.getProperty(prop, def);
}
- }));
+ });
}
/**
@@ -169,9 +150,9 @@
public static void setProperty(String property, String value) {
final String prop = property;
final String val = value;
- java.security.AccessController.doPrivileged
- (new java.security.PrivilegedAction() {
- public Object run() {
+ java.security.AccessController.doPrivileged(
+ new java.security.PrivilegedAction<Void>() {
+ public Void run() {
System.setProperty(prop, val);
return null;
}
@@ -188,7 +169,7 @@
out.println("-------------------Test environment----------" +
"---------");
- for(Enumeration keys = System.getProperties().keys();
+ for(Enumeration<?> keys = System.getProperties().keys();
keys.hasMoreElements();) {
String property = (String) keys.nextElement();
@@ -252,7 +233,7 @@
/*
* Obtain the URL for the codebase.
*/
- URL codebaseURL = dstDir.toURL();
+ URL codebaseURL = dstDir.toURI().toURL();
/*
* Specify where we will copy the class definition from, if
@@ -407,26 +388,46 @@
*/
public static int getUnusedRandomPort() {
int numTries = 0;
- int unusedRandomPort = FIXED_PORT_MIN;
- Exception ex = null;
+ IOException ex = null;
- while (numTries++ < 10) {
+ while (numTries++ < MAX_SERVER_SOCKET_TRIES) {
+ int unusedRandomPort = -1;
ex = null; //reset
try (ServerSocket ss = new ServerSocket(0)) {
unusedRandomPort = ss.getLocalPort();
- } catch (Exception e) {
+ } catch (IOException e) {
ex = e;
+ // temporarily print stack trace here until we find out why
+ // tests are failing.
+ System.err.println("TestLibrary.getUnusedRandomPort() caught "
+ + "exception on iteration " + numTries
+ + (numTries==MAX_SERVER_SOCKET_TRIES ? " (the final try)."
+ : "."));
+ ex.printStackTrace();
}
- if (!isReservedPort(unusedRandomPort)) {
- return unusedRandomPort;
+ if (unusedRandomPort >= 0) {
+ if (isReservedPort(unusedRandomPort)) {
+ System.out.println("INFO: On try # " + numTries
+ + (numTries==MAX_SERVER_SOCKET_TRIES ? ", the final try, ": ",")
+ + " ServerSocket(0) returned the reserved port "
+ + unusedRandomPort
+ + " in TestLibrary.getUnusedRandomPort() ");
+ } else {
+ return unusedRandomPort;
+ }
}
}
// If we're here, then either an exception was thrown or the port is
// a reserved port.
- throw new RuntimeException("Error getting unused random port.", ex);
+ if (ex==null) {
+ throw new RuntimeException("Error getting unused random port. The"
+ +" last port returned by ServerSocket(0) was a reserved port");
+ } else {
+ throw new RuntimeException("Error getting unused random port.", ex);
+ }
}
/**
--- a/jdk/test/java/rmi/transport/acceptLoop/CloseServerSocketOnTermination.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/transport/acceptLoop/CloseServerSocketOnTermination.java Mon Dec 17 08:30:06 2012 -0500
@@ -31,7 +31,6 @@
* exception for which it doesn't even consult the RMIFailureHandler.
* @author Peter Jones
*
- * @build CloseServerSocketOnTermination
* @run main/othervm CloseServerSocketOnTermination
*/
--- a/jdk/test/java/rmi/transport/checkFQDN/CheckFQDN.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/transport/checkFQDN/CheckFQDN.java Mon Dec 17 08:30:06 2012 -0500
@@ -33,8 +33,7 @@
* @author Laird Dornin
*
* @library ../../testlibrary
- * @build CheckFQDN CheckFQDNClient CheckFQDN_Stub TellServerName
- * @build TestLibrary
+ * @build TestLibrary CheckFQDNClient CheckFQDN_Stub TellServerName
* @run main/othervm/timeout=120 CheckFQDN
*/
--- a/jdk/test/java/rmi/transport/checkLeaseInfoLeak/CheckLeaseLeak.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/transport/checkLeaseInfoLeak/CheckLeaseLeak.java Mon Dec 17 08:30:06 2012 -0500
@@ -28,8 +28,7 @@
* @author Laird Dornin
*
* @library ../../testlibrary
- * @build CheckLeaseLeak CheckLeaseLeak_Stub LeaseLeakClient LeaseLeak
- * @build TestLibrary
+ * @build TestLibrary CheckLeaseLeak_Stub LeaseLeakClient LeaseLeak
* @run main/othervm/timeout=240 CheckLeaseLeak
*
*/
--- a/jdk/test/java/rmi/transport/closeServerSocket/CloseServerSocket.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/transport/closeServerSocket/CloseServerSocket.java Mon Dec 17 08:30:06 2012 -0500
@@ -31,7 +31,7 @@
* @author Peter Jones
*
* @library ../../testlibrary
- * @build CloseServerSocket TestLibrary
+ * @build TestLibrary
* @run main/othervm CloseServerSocket
*/
--- a/jdk/test/java/rmi/transport/dgcDeadLock/DGCDeadLock.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/transport/dgcDeadLock/DGCDeadLock.java Mon Dec 17 08:30:06 2012 -0500
@@ -28,11 +28,7 @@
* @author Laird Dornin
*
* @library ../../testlibrary
- * @build DGCDeadLock
- * @build Test
- * @build TestImpl
- * @build TestImpl_Stub
- * @build TestLibrary
+ * @build TestLibrary Test TestImpl TestImpl_Stub
* @run main/othervm/policy=security.policy/timeout=360 DGCDeadLock
*/
--- a/jdk/test/java/rmi/transport/handshakeFailure/HandshakeFailure.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/transport/handshakeFailure/HandshakeFailure.java Mon Dec 17 08:30:06 2012 -0500
@@ -31,7 +31,7 @@
* @author Peter Jones
*
* @library ../../testlibrary
- * @build HandshakeFailure TestLibrary
+ * @build TestLibrary
* @run main/othervm HandshakeFailure
*/
--- a/jdk/test/java/rmi/transport/handshakeTimeout/HandshakeTimeout.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/transport/handshakeTimeout/HandshakeTimeout.java Mon Dec 17 08:30:06 2012 -0500
@@ -34,7 +34,7 @@
* @author Peter Jones
*
* @library ../../testlibrary
- * @build HandshakeTimeout TestLibrary
+ * @build TestLibrary
* @run main/othervm HandshakeTimeout
*/
--- a/jdk/test/java/rmi/transport/httpSocket/HttpSocketTest.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/transport/httpSocket/HttpSocketTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -27,7 +27,7 @@
* @author Dana Burns
*
* @library ../../testlibrary
- * @build HttpSocketTest HttpSocketTest_Stub TestLibrary
+ * @build TestLibrary HttpSocketTest HttpSocketTest_Stub
* @run main/othervm/policy=security.policy HttpSocketTest
*/
--- a/jdk/test/java/rmi/transport/rapidExportUnexport/RapidExportUnexport.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/transport/rapidExportUnexport/RapidExportUnexport.java Mon Dec 17 08:30:06 2012 -0500
@@ -32,7 +32,7 @@
* @author Peter Jones
*
* @library ../../testlibrary
- * @build TestLibrary RapidExportUnexport
+ * @build TestLibrary
* @run main/othervm RapidExportUnexport
*/
--- a/jdk/test/java/rmi/transport/readTimeout/ReadTimeoutTest.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/transport/readTimeout/ReadTimeoutTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -27,12 +27,9 @@
* @summary Incoming connections should be subject to timeout
* @author Adrian Colley
*
- * @library ../../testlibrary
- * @build TestIface
- * @build TestImpl
- * @build TestImpl_Stub
- * @build ReadTimeoutTest
- * @run main/othervm/policy=security.policy/timeout=60 -Dsun.rmi.transport.tcp.readTimeout=5000 ReadTimeoutTest
+ * @build TestIface TestImpl TestImpl_Stub
+ * @run main/othervm/policy=security.policy/timeout=60
+ * -Dsun.rmi.transport.tcp.readTimeout=5000 ReadTimeoutTest
*/
/* This test sets a very short read timeout, exports an object, and then
--- a/jdk/test/java/rmi/transport/reuseDefaultPort/ReuseDefaultPort.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/transport/reuseDefaultPort/ReuseDefaultPort.java Mon Dec 17 08:30:06 2012 -0500
@@ -34,7 +34,7 @@
* @author Peter Jones
*
* @library ../../testlibrary
- * @build ReuseDefaultPort TestLibrary
+ * @build TestLibrary
* @run main/othervm ReuseDefaultPort
*/
--- a/jdk/test/java/rmi/transport/runtimeThreadInheritanceLeak/RuntimeThreadInheritanceLeak.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/rmi/transport/runtimeThreadInheritanceLeak/RuntimeThreadInheritanceLeak.java Mon Dec 17 08:30:06 2012 -0500
@@ -38,7 +38,6 @@
* subsystems also not holding on to the loader in their daemon threads.]
* @author Peter Jones
*
- * @build RuntimeThreadInheritanceLeak
* @build RuntimeThreadInheritanceLeak_Stub
* @run main/othervm RuntimeThreadInheritanceLeak
*/
--- a/jdk/test/java/security/Security/ClassLoaderDeadlock/ClassLoaderDeadlock.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/security/Security/ClassLoaderDeadlock/ClassLoaderDeadlock.sh Mon Dec 17 08:30:06 2012 -0500
@@ -87,7 +87,7 @@
${TESTSRC}${FILESEP}provider${FILESEP}HashProvider.java
# run the test
-${TESTJAVA}${FILESEP}bin${FILESEP}java \
+${TESTJAVA}${FILESEP}bin${FILESEP}java ${TESTVMOPTS} \
-classpath "${TESTCLASSES}${PATHSEP}${TESTSRC}${FILESEP}Deadlock.jar" \
-Djava.awt.headless=true \
ClassLoaderDeadlock
--- a/jdk/test/java/security/Security/ClassLoaderDeadlock/Deadlock.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/security/Security/ClassLoaderDeadlock/Deadlock.sh Mon Dec 17 08:30:06 2012 -0500
@@ -62,5 +62,5 @@
JAVA="${TESTJAVA}${FILESEP}bin${FILESEP}java"
-${JAVA} -cp "${TESTCLASSES}${PATHSEP}${TESTSRC}${FILESEP}Deadlock.jar" Deadlock
+${JAVA} ${TESTVMOPTS} -cp "${TESTCLASSES}${PATHSEP}${TESTSRC}${FILESEP}Deadlock.jar" Deadlock
--- a/jdk/test/java/security/Security/ClassLoaderDeadlock/Deadlock2.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/security/Security/ClassLoaderDeadlock/Deadlock2.sh Mon Dec 17 08:30:06 2012 -0500
@@ -100,8 +100,8 @@
rm Deadlock2*.class
# create serialized object and run the test
-${TESTJAVA}${FILESEP}bin${FILESEP}java CreateSerialized
-${TESTJAVA}${FILESEP}bin${FILESEP}java -Djava.ext.dirs=${TESTCLASSES}${FILESEP}testlib Deadlock2
+${TESTJAVA}${FILESEP}bin${FILESEP}java ${TESTVMOPTS} CreateSerialized
+${TESTJAVA}${FILESEP}bin${FILESEP}java ${TESTVMOPTS} -Djava.ext.dirs=${TESTCLASSES}${FILESEP}testlib Deadlock2
STATUS=$?
# clean up
--- a/jdk/test/java/security/Security/signedfirst/Dyn.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/security/Security/signedfirst/Dyn.sh Mon Dec 17 08:30:06 2012 -0500
@@ -83,7 +83,7 @@
${TESTSRC}${FILESEP}DynSignedProvFirst.java
# run the test
-${TESTJAVA}${FILESEP}bin${FILESEP}java \
+${TESTJAVA}${FILESEP}bin${FILESEP}java ${TESTVMOPTS} \
-classpath "${TESTCLASSES}${PATHSEP}${TESTSRC}${FILESEP}exp.jar" \
DynSignedProvFirst
--- a/jdk/test/java/security/Security/signedfirst/Static.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/security/Security/signedfirst/Static.sh Mon Dec 17 08:30:06 2012 -0500
@@ -84,7 +84,7 @@
# run the test
cd ${TESTSRC}${FILESEP}
-${TESTJAVA}${FILESEP}bin${FILESEP}java \
+${TESTJAVA}${FILESEP}bin${FILESEP}java ${TESTVMOPTS} \
-classpath "${TESTCLASSES}${PATHSEP}${TESTSRC}${FILESEP}exp.jar" \
-Djava.security.properties=file:${TESTSRC}${FILESEP}Static.props \
StaticSignedProvFirst
--- a/jdk/test/java/security/cert/CertificateFactory/slowstream.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/security/cert/CertificateFactory/slowstream.sh Mon Dec 17 08:30:06 2012 -0500
@@ -46,5 +46,5 @@
esac
${TESTJAVA}${FS}bin${FS}javac -d . ${TESTSRC}${FS}SlowStream.java
-${TESTJAVA}${FS}bin${FS}java -Dtest.src=${TESTSRC} SlowStreamWriter | \
- ${TESTJAVA}${FS}bin${FS}java SlowStreamReader
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -Dtest.src=${TESTSRC} SlowStreamWriter | \
+ ${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} SlowStreamReader
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/util/Calendar/GenericTimeZoneNamesTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.*;
+import sun.util.locale.provider.TimeZoneNameUtility;
+
+public class GenericTimeZoneNamesTest {
+ private static final String[] PT = {
+ "America/Los_Angeles", "US/Pacific", "PST"
+ };
+
+ private static int errors = 0;
+
+ public static void main(String[] args) {
+ for (String tag : args) {
+ Locale locale = Locale.forLanguageTag(tag);
+ for (String tzid : PT) {
+ test(tzid, TimeZone.LONG, locale, "Pacific Time");
+ test(tzid, TimeZone.SHORT, locale, "PT");
+ }
+ }
+
+ if (errors != 0) {
+ throw new RuntimeException("test failed");
+ }
+ }
+
+ private static void test(String tzid, int style, Locale locale, String expected) {
+ // No public API to get generic time zone names (JDK 8)
+ String got = TimeZoneNameUtility.retrieveGenericDisplayName(tzid, style, locale);
+ if (!expected.equals(got)) {
+ System.err.printf("test: tzid=%s, locale=%s, style=%d, got=\"%s\", expected=\"%s\"%n",
+ tzid, locale, style, got, expected);
+ errors++;
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/util/Calendar/GenericTimeZoneNamesTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,47 @@
+#
+# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+# @test
+# @bug 8003267
+# @summary Unit test for generic time zone names support
+# @compile -XDignore.symbol.file GenericTimeZoneNamesTest.java
+# @run shell GenericTimeZoneNamesTest.sh
+
+# This test is locale data-dependent and assumes that both JRE and CLDR
+# have the same geneic time zone names in English.
+
+STATUS=0
+echo "Locale providers: default"
+# TODO: The purpose of ja-JP is to make sure the fallback for generic
+# names works. Remove ja-JP when adding generic names to localized
+# resources.
+if ! ${TESTJAVA}/bin/java -esa -cp "${TESTCLASSES}" GenericTimeZoneNamesTest en-US ja-JP; then
+ STATUS=1
+fi
+
+echo "Locale providers: CLDR"
+if ! ${TESTJAVA}/bin/java -esa -cp "${TESTCLASSES}" -Djava.locale.providers=CLDR GenericTimeZoneNamesTest en-US; then
+ STATUS=1
+fi
+exit ${STATUS}
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/util/Calendar/NarrowNamesTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.*;
+import static java.util.GregorianCalendar.*;
+
+public class NarrowNamesTest {
+ private static final Locale US = Locale.US;
+ private static final Locale JAJPJP = new Locale("ja", "JP", "JP");
+ private static final Locale THTH = new Locale("th", "TH");
+
+ private static final String RESET_INDEX = "RESET_INDEX";
+
+ private static int errors = 0;
+
+ // This test is locale data-dependent.
+ public static void main(String[] args) {
+ test(US, ERA, "B",
+ ERA, BC, YEAR, 1);
+ test(US, ERA, "A",
+ ERA, AD, YEAR, 2012);
+ test(US, DAY_OF_WEEK, "S",
+ YEAR, 2012, MONTH, DECEMBER, DAY_OF_MONTH, 23);
+ test(US, AM_PM, "a",
+ HOUR_OF_DAY, 10);
+ test(US, AM_PM, "p",
+ HOUR_OF_DAY, 23);
+ test(JAJPJP, DAY_OF_WEEK, "\u65e5",
+ YEAR, 24, MONTH, DECEMBER, DAY_OF_MONTH, 23);
+ test(THTH, MONTH, NARROW_STANDALONE, "\u0e18.\u0e04.",
+ YEAR, 2555, MONTH, DECEMBER, DAY_OF_MONTH, 5);
+ test(THTH, DAY_OF_WEEK, "\u0e1e",
+ YEAR, 2555, MONTH, DECEMBER, DAY_OF_MONTH, 5);
+
+ testMap(US, DAY_OF_WEEK, ALL_STYLES, // shouldn't include any narrow names
+ "", // 1-based indexing for DAY_OF_WEEK
+ "Sunday", // Sunday
+ "Monday", // Monday
+ "Tuesday", // Tuesday
+ "Wednesday", // Wednesday
+ "Thursday", // Thursday
+ "Friday", // Friday
+ "Saturday", // Saturday
+ RESET_INDEX,
+ "", // 1-based indexing for DAY_OF_WEEK
+ "Sun", // abb Sunday
+ "Mon", // abb Monday
+ "Tue", // abb Tuesday
+ "Wed", // abb Wednesday
+ "Thu", // abb Thursday
+ "Fri", // abb Friday
+ "Sat" // abb Saturday
+ );
+ testMap(US, DAY_OF_WEEK, NARROW_FORMAT); // expect null
+ testMap(US, AM_PM, ALL_STYLES,
+ "AM", "PM",
+ RESET_INDEX,
+ "a", "p");
+ testMap(JAJPJP, DAY_OF_WEEK, NARROW_STANDALONE); // expect null
+ testMap(JAJPJP, DAY_OF_WEEK, NARROW_FORMAT,
+ "", // 1-based indexing for DAY_OF_WEEK
+ "\u65e5",
+ "\u6708",
+ "\u706b",
+ "\u6c34",
+ "\u6728",
+ "\u91d1",
+ "\u571f");
+ testMap(THTH, MONTH, NARROW_FORMAT); // expect null
+ testMap(THTH, MONTH, NARROW_STANDALONE,
+ "\u0e21.\u0e04.",
+ "\u0e01.\u0e1e.",
+ "\u0e21\u0e35.\u0e04.",
+ "\u0e40\u0e21.\u0e22.",
+ "\u0e1e.\u0e04.",
+ "\u0e21\u0e34.\u0e22.",
+ "\u0e01.\u0e04.",
+ "\u0e2a.\u0e04.",
+ "\u0e01.\u0e22.",
+ "\u0e15.\u0e04.",
+ "\u0e1e.\u0e22.",
+ "\u0e18.\u0e04.");
+
+ if (errors != 0) {
+ throw new RuntimeException("test failed");
+ }
+ }
+
+ private static void test(Locale locale, int field, String expected, int... data) {
+ test(locale, field, NARROW_FORMAT, expected, data);
+ }
+
+ private static void test(Locale locale, int field, int style, String expected, int... fieldValuePairs) {
+ Calendar cal = Calendar.getInstance(locale);
+ cal.clear();
+ for (int i = 0; i < fieldValuePairs.length;) {
+ int f = fieldValuePairs[i++];
+ int v = fieldValuePairs[i++];
+ cal.set(f, v);
+ }
+ String got = cal.getDisplayName(field, style, locale);
+ if (!expected.equals(got)) {
+ System.err.printf("test: locale=%s, field=%d, value=%d, style=%d, got=\"%s\", expected=\"%s\"%n",
+ locale, field, cal.get(field), style, got, expected);
+ errors++;
+ }
+ }
+
+ private static void testMap(Locale locale, int field, int style, String... expected) {
+ Map<String, Integer> expectedMap = null;
+ if (expected.length > 0) {
+ expectedMap = new TreeMap<>(LengthBasedComparator.INSTANCE);
+ int index = 0;
+ for (int i = 0; i < expected.length; i++) {
+ if (expected[i].isEmpty()) {
+ index++;
+ continue;
+ }
+ if (expected[i] == RESET_INDEX) {
+ index = 0;
+ continue;
+ }
+ expectedMap.put(expected[i], index++);
+ }
+ }
+ Calendar cal = Calendar.getInstance(locale);
+ Map<String, Integer> got = cal.getDisplayNames(field, style, locale);
+ if (!(expectedMap == null && got == null)
+ && !expectedMap.equals(got)) {
+ System.err.printf("testMap: locale=%s, field=%d, style=%d, expected=%s, got=%s%n",
+ locale, field, style, expectedMap, got);
+ errors++;
+ }
+ }
+
+ /**
+ * Comparator implementation for TreeMap which iterates keys from longest
+ * to shortest.
+ */
+ private static class LengthBasedComparator implements Comparator<String> {
+ private static final LengthBasedComparator INSTANCE = new LengthBasedComparator();
+
+ private LengthBasedComparator() {
+ }
+
+ @Override
+ public int compare(String o1, String o2) {
+ int n = o2.length() - o1.length();
+ return (n == 0) ? o1.compareTo(o2) : n;
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/util/Calendar/NarrowNamesTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,41 @@
+#
+# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+# @test
+# @bug 8000983
+# @summary Unit test for narrow names support
+# @build NarrowNamesTest
+# @run shell NarrowNamesTest.sh
+
+# This test is locale data-dependent and assumes that both JRE and CLDR
+# have the same narrow names.
+
+STATUS=0
+for P in "JRE,SPI" "CLDR"
+do
+ echo "Locale providers: $P"
+ if ! ${TESTJAVA}/bin/java -esa -cp "${TESTCLASSES}" -Djava.locale.providers="${P}" NarrowNamesTest; then
+ STATUS=1
+ fi
+done
+exit ${STATUS}
--- a/jdk/test/java/util/Collections/EmptyIterator.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/Collections/EmptyIterator.java Mon Dec 17 08:30:06 2012 -0500
@@ -23,12 +23,12 @@
/*
* @test
- * @bug 5017904 6356890
+ * @bug 5017904 6356890 8004928
* @summary Test empty iterators, enumerations, and collections
*/
+import static java.util.Collections.*;
import java.util.*;
-import static java.util.Collections.*;
public class EmptyIterator {
@@ -45,10 +45,13 @@
testEmptyIterator(emptyTable.values().iterator());
testEmptyIterator(emptyTable.entrySet().iterator());
- testEmptyEnumeration(javax.swing.tree.DefaultMutableTreeNode
- .EMPTY_ENUMERATION);
- testEmptyEnumeration(javax.swing.text.SimpleAttributeSet
- .EMPTY.getAttributeNames());
+ final Enumeration<EmptyIterator> finalEmptyTyped =
+ Collections.emptyEnumeration();
+ testEmptyEnumeration(finalEmptyTyped);
+
+ final Enumeration finalEmptyAbstract =
+ Collections.emptyEnumeration();
+ testEmptyEnumeration(finalEmptyAbstract);
@SuppressWarnings("unchecked") Iterator<?> x =
new sun.tools.java.MethodSet()
--- a/jdk/test/java/util/Currency/PropertiesTest.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/Currency/PropertiesTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -70,7 +70,7 @@
run() {
echo ''
- sh -xc "${TESTJAVA}${FS}bin${FS}java -cp ${TESTCLASSES} $*" 2>&1
+ sh -xc "${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -cp ${TESTCLASSES} $*" 2>&1
if [ $? != 0 ]; then failures=`expr $failures + 1`; fi
}
@@ -113,7 +113,7 @@
# run
echo ''
-sh -xc "${WRITABLEJDK}${FS}bin${FS}java -cp ${TESTCLASSES} PropertiesTest -d dump3"
+sh -xc "${WRITABLEJDK}${FS}bin${FS}java ${TESTVMOPTS} -cp ${TESTCLASSES} PropertiesTest -d dump3"
if [ $? != 0 ]; then failures=`expr $failures + 1`; fi
# Cleanup
--- a/jdk/test/java/util/Locale/LocaleCategory.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/Locale/LocaleCategory.sh Mon Dec 17 08:30:06 2012 -0500
@@ -69,7 +69,7 @@
# test user.xxx.display user.xxx.format properties
# run
-RUNCMD="${TESTJAVA}${FS}bin${FS}java -classpath ${TESTCLASSES} -Duser.language.display=ja -Duser.language.format=zh LocaleCategory"
+RUNCMD="${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -classpath ${TESTCLASSES} -Duser.language.display=ja -Duser.language.format=zh LocaleCategory"
echo ${RUNCMD}
${RUNCMD}
@@ -85,7 +85,7 @@
# test user.xxx properties overriding user.xxx.display/format
# run
-RUNCMD="${TESTJAVA}${FS}bin${FS}java -classpath ${TESTCLASSES} -Duser.language=en -Duser.language.display=ja -Duser.language.format=zh LocaleCategory"
+RUNCMD="${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -classpath ${TESTCLASSES} -Duser.language=en -Duser.language.display=ja -Duser.language.format=zh LocaleCategory"
echo ${RUNCMD}
${RUNCMD}
--- a/jdk/test/java/util/Locale/LocaleProviders.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/Locale/LocaleProviders.sh Mon Dec 17 08:30:06 2012 -0500
@@ -96,12 +96,12 @@
${TESTJAVA}${FS}bin${FS}jar cvf ${SPIDIR}${FS}tznp.jar -C ${SPIDIR}${FS}dest .
# get the platform default locales
-PLATDEF=`${TESTJAVA}${FS}bin${FS}java -classpath ${TESTCLASSES} LocaleProviders getPlatformLocale display`
+PLATDEF=`${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -classpath ${TESTCLASSES} LocaleProviders getPlatformLocale display`
DEFLANG=`echo ${PLATDEF} | sed -e "s/,.*//"`
DEFCTRY=`echo ${PLATDEF} | sed -e "s/.*,//"`
echo "DEFLANG=${DEFLANG}"
echo "DEFCTRY=${DEFCTRY}"
-PLATDEF=`${TESTJAVA}${FS}bin${FS}java -classpath ${TESTCLASSES} LocaleProviders getPlatformLocale format`
+PLATDEF=`${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -classpath ${TESTCLASSES} LocaleProviders getPlatformLocale format`
DEFFMTLANG=`echo ${PLATDEF} | sed -e "s/,.*//"`
DEFFMTCTRY=`echo ${PLATDEF} | sed -e "s/.*,//"`
echo "DEFFMTLANG=${DEFFMTLANG}"
@@ -109,7 +109,7 @@
runTest()
{
- RUNCMD="${TESTJAVA}${FS}bin${FS}java -classpath ${TESTCLASSES} -Djava.locale.providers=$PREFLIST LocaleProviders $METHODNAME $PARAM1 $PARAM2 $PARAM3"
+ RUNCMD="${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -classpath ${TESTCLASSES} -Djava.locale.providers=$PREFLIST LocaleProviders $METHODNAME $PARAM1 $PARAM2 $PARAM3"
echo ${RUNCMD}
${RUNCMD}
result=$?
--- a/jdk/test/java/util/PluggableLocale/ExecTest.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/PluggableLocale/ExecTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -119,9 +119,9 @@
# run
if [ "$3" = "true" ]
then
- RUNCMD="${TESTJAVA}${FS}bin${FS}java -Djava.ext.dirs=${EXTDIRS} $2 "
+ RUNCMD="${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -Djava.ext.dirs=${EXTDIRS} $2 "
else
- RUNCMD="${TESTJAVA}${FS}bin${FS}java -classpath ${CLASSPATHARG} $2 "
+ RUNCMD="${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -classpath ${CLASSPATHARG} $2 "
fi
echo ${RUNCMD}
--- a/jdk/test/java/util/PluggableLocale/GenericTest.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/PluggableLocale/GenericTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -41,6 +41,7 @@
com.bar.CurrencyNameProviderImpl2 currencyNP2 = new com.bar.CurrencyNameProviderImpl2();
com.bar.LocaleNameProviderImpl localeNP = new com.bar.LocaleNameProviderImpl();
com.bar.TimeZoneNameProviderImpl tzNP = new com.bar.TimeZoneNameProviderImpl();
+ com.bar.GenericTimeZoneNameProviderImpl tzGenNP = new com.bar.GenericTimeZoneNameProviderImpl();
com.bar.CalendarDataProviderImpl calDataP = new com.bar.CalendarDataProviderImpl();
com.bar.CalendarNameProviderImpl calNameP = new com.bar.CalendarNameProviderImpl();
@@ -73,6 +74,7 @@
expected.addAll(Arrays.asList(currencyNP2.getAvailableLocales()));
expected.addAll(Arrays.asList(localeNP.getAvailableLocales()));
expected.addAll(Arrays.asList(tzNP.getAvailableLocales()));
+ expected.addAll(Arrays.asList(tzGenNP.getAvailableLocales()));
expected.addAll(Arrays.asList(calDataP.getAvailableLocales()));
expected.addAll(Arrays.asList(calNameP.getAvailableLocales()));
if (!result.equals(expected)) {
--- a/jdk/test/java/util/PluggableLocale/TimeZoneNameProviderTest.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/PluggableLocale/TimeZoneNameProviderTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -40,6 +40,7 @@
TimeZoneNameProviderTest() {
test1();
test2();
+ test3();
aliasTest();
}
@@ -92,6 +93,7 @@
final String pattern = "z";
final Locale OSAKA = new Locale("ja", "JP", "osaka");
final Locale KYOTO = new Locale("ja", "JP", "kyoto");
+ final Locale GENERIC = new Locale("ja", "JP", "generic");
final String[] TIMEZONES = {
"GMT", "America/Los_Angeles", "SystemV/PST8",
@@ -157,6 +159,29 @@
}
}
+ void test3() {
+ final String[] TZNAMES = {
+ LATIME, PST, PST8PDT, US_PACIFIC,
+ TOKYOTIME, JST, JAPAN,
+ };
+ for (String tzname : TZNAMES) {
+ TimeZone tz = TimeZone.getTimeZone(tzname);
+ for (int style : new int[] { TimeZone.LONG, TimeZone.SHORT }) {
+ String osakaStd = tz.getDisplayName(false, style, OSAKA);
+ if (osakaStd != null) {
+ // No API for getting generic time zone names
+ String generic = TimeZoneNameUtility.retrieveGenericDisplayName(tzname,
+ style, GENERIC);
+ String expected = "Generic " + osakaStd;
+ if (!expected.equals(generic)) {
+ throw new RuntimeException("Wrong generic name: got=\"" + generic
+ + "\", expected=\"" + expected + "\"");
+ }
+ }
+ }
+ }
+ }
+
final String LATIME = "America/Los_Angeles";
final String PST = "PST";
final String PST8PDT = "PST8PDT";
--- a/jdk/test/java/util/PluggableLocale/TimeZoneNameProviderTest.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/PluggableLocale/TimeZoneNameProviderTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,6 @@
#!/bin/sh
#
# @test
-# @bug 4052440
+# @bug 4052440 8003267
# @summary TimeZoneNameProvider tests
# @run shell ExecTest.sh bar TimeZoneNameProviderTest true
Binary file jdk/test/java/util/PluggableLocale/barprovider.jar has changed
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/util/PluggableLocale/providersrc/GenericTimeZoneNameProviderImpl.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+/*
+ *
+ */
+
+package com.bar;
+
+import java.util.*;
+import java.util.spi.*;
+
+import com.foobar.Utils;
+
+/**
+ * Implementation class for getGenericTimeZoneName which returns "Generic "+<standard name in OSAKA>.
+ */
+public class GenericTimeZoneNameProviderImpl extends TimeZoneNameProviderImpl {
+ static final Locale jaJPGeneric = new Locale("ja", "JP", "generic");
+ static final Locale OSAKA = new Locale("ja", "JP", "osaka");
+
+ static Locale[] avail = {
+ jaJPGeneric
+ };
+
+ @Override
+ public Locale[] getAvailableLocales() {
+ return avail;
+ }
+
+ @Override
+ public String getGenericDisplayName(String id, int style, Locale locale) {
+ if (!jaJPGeneric.equals(locale)) {
+ return null;
+ }
+ String std = super.getDisplayName(id, false, style, OSAKA);
+ return (std != null) ? "Generic " + std : null;
+ }
+}
--- a/jdk/test/java/util/PluggableLocale/providersrc/Makefile Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/PluggableLocale/providersrc/Makefile Mon Dec 17 08:30:06 2012 -0500
@@ -38,6 +38,7 @@
CurrencyNameProviderImpl.java \
CurrencyNameProviderImpl2.java \
TimeZoneNameProviderImpl.java \
+ GenericTimeZoneNameProviderImpl.java \
LocaleNameProviderImpl.java \
CalendarDataProviderImpl.java \
CalendarNameProviderImpl.java \
--- a/jdk/test/java/util/PluggableLocale/providersrc/java.util.spi.TimeZoneNameProvider Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/PluggableLocale/providersrc/java.util.spi.TimeZoneNameProvider Mon Dec 17 08:30:06 2012 -0500
@@ -5,3 +5,4 @@
# implementation class
#
com.bar.TimeZoneNameProviderImpl
+com.bar.GenericTimeZoneNameProviderImpl
--- a/jdk/test/java/util/ResourceBundle/Bug6299235Test.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/ResourceBundle/Bug6299235Test.sh Mon Dec 17 08:30:06 2012 -0500
@@ -75,7 +75,7 @@
cd ${TESTSRC}
-${TESTJAVA}/bin/java -cp ${TESTCLASSES} -Djava.ext.dirs=${NEW_EXT_DIR} Bug6299235Test
+${TESTJAVA}/bin/java ${TESTVMOPTS} -cp ${TESTCLASSES} -Djava.ext.dirs=${NEW_EXT_DIR} Bug6299235Test
if [ $? -ne 0 ]
then
--- a/jdk/test/java/util/ResourceBundle/Control/MissingResourceCauseTest.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/ResourceBundle/Control/MissingResourceCauseTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -50,7 +50,7 @@
: ${TESTCLASS:=.}
: ${TESTSRC:=.}
-${TESTJAVA}/bin/java -esa -cp ${TESTCLASS}${DEL}${TESTSRC} MissingResourceCauseTest
+${TESTJAVA}/bin/java ${TESTVMOPTS} -esa -cp ${TESTCLASS}${DEL}${TESTSRC} MissingResourceCauseTest
STATUS=$?
chmod 666 $UNREADABLE
rm -f $UNREADABLE
--- a/jdk/test/java/util/ServiceLoader/basic.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/ServiceLoader/basic.sh Mon Dec 17 08:30:06 2012 -0500
@@ -94,7 +94,7 @@
cp="$1"; shift
if [ -z "$cp" ]; then cp="$TESTCLASSES"; else cp="$TESTCLASSES$SEP$cp"; fi
vmargs="$1"; shift
- sh -xc "'$JAVA' -cp $cp $vmargs $T $*" 2>&1
+ sh -xc "'$JAVA' ${TESTVMOPTS} -cp $cp $vmargs $T $*" 2>&1
if [ $? != 0 ]; then failures=`expr $failures + 1`; fi
}
--- a/jdk/test/java/util/TimeZone/OldIDMappingTest.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/TimeZone/OldIDMappingTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -40,7 +40,7 @@
if [ x"$I" != x ]; then
D="-Dsun.timezone.ids.oldmapping=${I}"
fi
- if ! ${JAVA} ${D} -cp ${TESTCLASSES} OldIDMappingTest -new; then
+ if ! ${JAVA} ${D} ${TESTVMOPTS} -cp ${TESTCLASSES} OldIDMappingTest -new; then
STATUS=1
fi
done
@@ -51,7 +51,7 @@
if [ "x$I" != x ]; then
D="-Dsun.timezone.ids.oldmapping=${I}"
fi
- if ! ${JAVA} ${D} -cp ${TESTCLASSES} OldIDMappingTest -old; then
+ if ! ${JAVA} ${D} ${TESTVMOPTS} -cp ${TESTCLASSES} OldIDMappingTest -old; then
STATUS=1
fi
done
--- a/jdk/test/java/util/TimeZone/TimeZoneDatePermissionCheck.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/TimeZone/TimeZoneDatePermissionCheck.sh Mon Dec 17 08:30:06 2012 -0500
@@ -53,6 +53,6 @@
# run it with the security manager on, plus accesscontroller debugging
# will go into infinite recursion trying to get enough permissions for
# printing Date of failing certificate unless fix is applied.
-${TESTJAVA}/bin/java -Djava.security.manager \
+${TESTJAVA}/bin/java ${TESTVMOPTS} -Djava.security.manager \
-Djava.security.debug=access,failure,policy \
-cp ${TESTCLASSES}/timezonedatetest.jar TimeZoneDatePermissionCheck
--- a/jdk/test/java/util/logging/CheckLockLocationTest.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/logging/CheckLockLocationTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -30,7 +30,6 @@
* @run main/othervm CheckLockLocationTest
*/
import java.io.File;
-import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.AccessDeniedException;
import java.nio.file.FileSystemException;
@@ -90,13 +89,12 @@
throw new RuntimeException("Test failed: should not have been able"
+ " to create FileHandler for " + "%t/" + NON_WRITABLE_DIR
+ "/log.log in non-writable directory.");
+ } catch (AccessDeniedException ex) {
+ // the right exception was thrown, so continue.
} catch (IOException ex) {
- // check for the right exception
- if (!(ex instanceof AccessDeniedException)) {
- throw new RuntimeException(
+ throw new RuntimeException(
"Test failed: Expected exception was not an "
+ "AccessDeniedException", ex);
- }
}
}
@@ -106,14 +104,11 @@
throw new RuntimeException("Test failed: should not have been able"
+ " to create FileHandler for " + "%t/" + NOT_A_DIR
+ "/log.log in non-directory.");
+ } catch (FileSystemException ex) {
+ // the right exception was thrown, so continue.
} catch (IOException ex) {
- // check for the right exception
- if (!(ex instanceof FileSystemException
- && ex.getMessage().contains("Not a directory"))) {
- throw new RuntimeException(
- "Test failed: Expected exception was not a "
- + "FileSystemException", ex);
- }
+ throw new RuntimeException("Test failed: exception thrown was not a "
+ + "FileSystemException", ex);
}
// Test 4: make sure we can't create a FileHandler in a non-existent dir
@@ -122,12 +117,11 @@
throw new RuntimeException("Test failed: should not have been able"
+ " to create FileHandler for " + "%t/" + NON_EXISTENT_DIR
+ "/log.log in a non-existent directory.");
+ } catch (NoSuchFileException ex) {
+ // the right exception was thrown, so continue.
} catch (IOException ex) {
- // check for the right exception
- if (!(ex instanceof NoSuchFileException)) {
- throw new RuntimeException("Test failed: Expected exception "
- + "was not a NoSuchFileException", ex);
- }
+ throw new RuntimeException("Test failed: Expected exception "
+ + "was not a NoSuchFileException", ex);
}
}
@@ -216,12 +210,14 @@
/*
* Recursively delete all files starting at specified file
*/
- private static void delete(File f) throws IOException {
+ private static void delete(File f) {
if (f != null && f.isDirectory()) {
for (File c : f.listFiles())
delete(c);
}
if (!f.delete())
- throw new FileNotFoundException("Failed to delete file: " + f);
- }
+ System.err.println(
+ "WARNING: unable to delete/cleanup writable test directory: "
+ + f );
+ }
}
--- a/jdk/test/java/util/logging/LoggingDeadlock4.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/logging/LoggingDeadlock4.java Mon Dec 17 08:30:06 2012 -0500
@@ -23,11 +23,11 @@
/*
* @test
- * @bug 6977677
+ * @bug 6977677 8004928
* @summary Deadlock between LogManager.<clinit> and Logger.getLogger()
* @author Daniel D. Daugherty
- * @build LoggingDeadlock4
- * @run main/othervm/timeout=15 -Djava.awt.headless=true LoggingDeadlock4
+ * @compile -XDignore.symbol.file LoggingDeadlock4.java
+ * @run main/othervm/timeout=15 LoggingDeadlock4
*/
import java.util.concurrent.CountDownLatch;
@@ -39,21 +39,16 @@
private static CountDownLatch lmIsRunning = new CountDownLatch(1);
private static CountDownLatch logIsRunning = new CountDownLatch(1);
+ // Create a sun.util.logging.PlatformLogger$JavaLogger object
+ // that has to be redirected when the LogManager class
+ // is initialized. This can cause a deadlock between
+ // LogManager.<clinit> and Logger.getLogger().
+ private static final sun.util.logging.PlatformLogger log =
+ sun.util.logging.PlatformLogger.getLogger("java.util.logging");
+
public static void main(String[] args) {
System.out.println("main: LoggingDeadlock4 is starting.");
- // Loading the java.awt.Container class will create a
- // sun.util.logging.PlatformLogger$JavaLogger object
- // that has to be redirected when the LogManager class
- // is initialized. This can cause a deadlock between
- // LogManager.<clinit> and Logger.getLogger().
- try {
- Class.forName("java.awt.Container");
- } catch (ClassNotFoundException cnfe) {
- throw new RuntimeException("Test failed: could not load"
- + " java.awt.Container." + cnfe);
- }
-
Thread lmThread = new Thread("LogManagerThread") {
public void run() {
// let main know LogManagerThread is running
--- a/jdk/test/java/util/prefs/CheckUserPrefsStorage.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/prefs/CheckUserPrefsStorage.sh Mon Dec 17 08:30:06 2012 -0500
@@ -50,14 +50,14 @@
esac
# run CheckUserPrefFirst - creates and stores a user pref
-${TESTJAVA}${FS}bin${FS}java -cp ${TESTCLASSES} -Djava.util.prefs.userRoot=. CheckUserPrefFirst
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -cp ${TESTCLASSES} -Djava.util.prefs.userRoot=. CheckUserPrefFirst
result=$?
if [ "$result" -ne "0" ]; then
exit 1
fi
# run CheckUserPrefLater - Looks for the stored pref
-${TESTJAVA}${FS}bin${FS}java -cp ${TESTCLASSES} -Djava.util.prefs.userRoot=. CheckUserPrefLater
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -cp ${TESTCLASSES} -Djava.util.prefs.userRoot=. CheckUserPrefLater
result=$?
if [ "$result" -ne "0" ]; then
exit 1
--- a/jdk/test/java/util/prefs/PrefsSpi.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/prefs/PrefsSpi.sh Mon Dec 17 08:30:06 2012 -0500
@@ -87,17 +87,17 @@
case "`uname`" in Windows*|CYGWIN* ) CPS=';';; *) CPS=':';; esac
-Sys "$java" "-cp" "$TESTCLASSES${CPS}extDir/PrefsSpi.jar" \
+Sys "$java" "${TESTVMOPTS}" "-cp" "$TESTCLASSES${CPS}extDir/PrefsSpi.jar" \
-Djava.util.prefs.PreferencesFactory=StubPreferencesFactory \
-Djava.util.prefs.userRoot=. \
PrefsSpi "StubPreferences"
-Sys "$java" "-cp" "$TESTCLASSES" \
+Sys "$java" "${TESTVMOPTS}" "-cp" "$TESTCLASSES" \
-Djava.util.prefs.userRoot=. \
PrefsSpi "java.util.prefs.*"
-Sys "$java" "-cp" "$TESTCLASSES${CPS}extDir/PrefsSpi.jar" \
+Sys "$java" "${TESTVMOPTS}" "-cp" "$TESTCLASSES${CPS}extDir/PrefsSpi.jar" \
-Djava.util.prefs.userRoot=. \
PrefsSpi "StubPreferences"
-Sys "$java" "-cp" "$TESTCLASSES" "-Djava.ext.dirs=extDir" \
+Sys "$java" "${TESTVMOPTS}" "-cp" "$TESTCLASSES" "-Djava.ext.dirs=extDir" \
-Djava.util.prefs.userRoot=. \
PrefsSpi "StubPreferences"
--- a/jdk/test/java/util/spi/ResourceBundleControlProvider/UserDefaultControlTest.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/spi/ResourceBundleControlProvider/UserDefaultControlTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -21,4 +21,5 @@
# questions.
#
-${TESTJAVA}/bin/java -Djava.ext.dirs=${TESTSRC} -cp ${TESTCLASSES} UserDefaultControlTest
\ No newline at end of file
+${TESTJAVA}/bin/java ${TESTVMOPTS} -Djava.ext.dirs=${TESTSRC} -cp ${TESTCLASSES} UserDefaultControlTest
+
--- a/jdk/test/java/util/zip/3GBZipFiles.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/zip/3GBZipFiles.sh Mon Dec 17 08:30:06 2012 -0500
@@ -1,4 +1,3 @@
-#
# Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
@@ -83,7 +82,7 @@
huge-*) filesize_="$hugeSize" ;;
tiny-*) filesize_="$tinySize" ;;
esac
- sys "$JAVA" "-cp" "$TESTCLASSES" "FileBuilder" \
+ sys "$JAVA" ${TESTVMOPTS} "-cp" "$TESTCLASSES" "FileBuilder" \
"$filetype_" "$filename_" "$filesize_"
}
--- a/jdk/test/java/util/zip/ZipFile/deletetempjar.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/java/util/zip/ZipFile/deletetempjar.sh Mon Dec 17 08:30:06 2012 -0500
@@ -30,7 +30,7 @@
TESTCLASSES=.
fi
-tmpfile=`$TESTJAVA/bin/java -classpath $TESTCLASSES DeleteTempJar`
+tmpfile=`$TESTJAVA/bin/java ${TESTVMOPTS} -classpath $TESTCLASSES DeleteTempJar`
rc=$?
if [ $rc != 0 ]; then
echo Unexpected failure with exit status $rc
--- a/jdk/test/javax/crypto/SecretKeyFactory/FailOverTest.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/javax/crypto/SecretKeyFactory/FailOverTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -82,6 +82,7 @@
fi
${TESTJAVA}${FS}bin${FS}java \
+ ${TESTVMOPTS} \
-classpath "${TESTSRC}${FS}P1.jar${PS}${TESTSRC}${FS}P2.jar${PS}." \
FailOverTest
result=$?
--- a/jdk/test/javax/print/applet/AppletPrintLookup.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/javax/print/applet/AppletPrintLookup.sh Mon Dec 17 08:30:06 2012 -0500
@@ -82,7 +82,7 @@
${TESTJAVA}${SEP}bin${SEP}appletviewer ${TESTCLASSES}${SEP}AppletPrintLookup.html &
cd ${TESTCLASSES}
-${TESTJAVA}${SEP}bin${SEP}java YesNo
+${TESTJAVA}${SEP}bin${SEP}java ${TESTVMOPTS} YesNo
if [ $? -ne 0 ]
then
echo "Test fails!"
--- a/jdk/test/javax/rmi/ssl/SocketFactoryTest.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/javax/rmi/ssl/SocketFactoryTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -26,8 +26,7 @@
* @bug 4932837 6582235
* @summary Test SslRMI[Client|Server]SocketFactory equals() and hashCode().
* @author Daniel Fuchs
- * @run clean SocketFactoryTest
- * @run build SocketFactoryTest
+ *
* @run main SocketFactoryTest
*/
--- a/jdk/test/javax/script/ProviderTest.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/javax/script/ProviderTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -45,6 +45,6 @@
echo "Running test ..."
-$JAVA -classpath \
+$JAVA ${TESTVMOPTS} -classpath \
"${TESTCLASSES}${PS}${TESTCLASSES}/dummy.jar" \
ProviderTest
--- a/jdk/test/javax/security/auth/Subject/doAs/Test.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/javax/security/auth/Subject/doAs/Test.sh Mon Dec 17 08:30:06 2012 -0500
@@ -71,7 +71,7 @@
cd ${TESTSRC}${FS}
cd $WD
echo $WD
-${TESTJAVA}${FS}bin${FS}java -classpath "${TESTCLASSES}${FS}" \
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -classpath "${TESTCLASSES}${FS}" \
-Djava.security.manager \
-Djava.security.policy=${TESTSRC}${FS}policy \
Test
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/javax/swing/JTree/8003830/bug8003830.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.awt.EventQueue;
+import java.awt.Rectangle;
+import java.awt.event.ActionEvent;
+import javax.swing.JTree;
+import javax.swing.plaf.basic.BasicTreeUI;
+import javax.swing.tree.TreePath;
+
+
+/* Originally reported as NetBeans bug 222081.
+ *
+ * @test
+ * @bug 8003830
+ * @summary NullPointerException in BasicTreeUI.Actions when getPathBounds returns null
+ * @author Jaroslav Tulach
+ * @run main bug8003830
+ */
+
+public class bug8003830 implements Runnable {
+ public static void main(String[] args) throws Exception {
+ EventQueue.invokeAndWait(new bug8003830());
+ }
+ @Override
+ public void run() {
+ testNPEAtActionsPage();
+ }
+
+ public void testNPEAtActionsPage() {
+ JTree tree = new JTree();
+ BasicTreeUI ui = new NullReturningTreeUI();
+ tree.setUI(ui);
+ BasicTreeUI.TreePageAction tpa = ui.new TreePageAction(0, "down");
+ tpa.actionPerformed(new ActionEvent(tree, 0, ""));
+ }
+
+ private static final class NullReturningTreeUI extends BasicTreeUI {
+ @Override
+ public Rectangle getPathBounds(JTree tree, TreePath path) {
+ // the method can return null and callers have to be ready for
+ // that. Simulate the case by returning null for unknown reason.
+ return null;
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/javax/swing/dnd/7171812/JListWithScroll.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import javax.swing.*;
+import java.awt.*;
+import java.awt.dnd.Autoscroll;
+
+public class JListWithScroll<E> extends JList<E> implements Autoscroll {
+ private Insets scrollInsets;
+
+ public JListWithScroll(E[] listData) {
+ super(listData);
+ scrollInsets = new Insets(50, 50, 50, 50);
+ }
+
+ @Override
+ public Insets getAutoscrollInsets() {
+ return scrollInsets;
+ }
+
+ @Override
+ public void autoscroll(Point cursorLoc) {
+ JViewport viewport = getViewport();
+
+ if (viewport == null) {
+ return;
+ }
+
+ Point viewPos = viewport.getViewPosition();
+ int viewHeight = viewport.getExtentSize().height;
+ int viewWidth = viewport.getExtentSize().width;
+
+ if ((cursorLoc.y - viewPos.y) < scrollInsets.top) {
+ viewport.setViewPosition(new Point(viewPos.x, Math.max(viewPos.y - scrollInsets.top, 0)));
+ } else if (((viewPos.y + viewHeight) - cursorLoc.y) < scrollInsets.bottom) {
+ viewport.setViewPosition(
+ new Point(viewPos.x, Math.min(viewPos.y + scrollInsets.bottom, this.getHeight() - viewHeight))
+ );
+ } else if ((cursorLoc.x - viewPos.x) < scrollInsets.left) {
+ viewport.setViewPosition(new Point(Math.max(viewPos.x - scrollInsets.left, 0), viewPos.y));
+ } else if (((viewPos.x + viewWidth) - cursorLoc.x) < scrollInsets.right) {
+ viewport.setViewPosition(
+ new Point(Math.min(viewPos.x + scrollInsets.right, this.getWidth() - viewWidth), viewPos.y)
+ );
+ }
+
+ }
+
+ public JViewport getViewport() {
+ Component curComp = this;
+
+ while (!(curComp instanceof JViewport) && (curComp != null)) {
+ curComp = curComp.getParent();
+ }
+ if(curComp instanceof JViewport) {
+ return (JViewport) curComp;
+ } else {
+ return null;
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/javax/swing/dnd/7171812/bug7171812.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* @test
+ @bug 7171812
+ @summary [macosx] Views keep scrolling back to the drag position after DnD
+ @author Alexander Zuev
+ @run main bug7171812
+ */
+
+import sun.awt.SunToolkit;
+
+import java.awt.*;
+import java.awt.dnd.*;
+import java.awt.event.InputEvent;
+import javax.swing.*;
+
+public class bug7171812 {
+ static JFrame mainFrame;
+ static String listData[];
+ static JListWithScroll<String> list;
+ static JScrollPane scrollPane;
+
+ /**
+ * @param args the command line arguments
+ */
+ public static void main(String[] args) throws Exception{
+ SunToolkit toolkit = (SunToolkit) Toolkit.getDefaultToolkit();
+
+ SwingUtilities.invokeAndWait(new Runnable() {
+ @Override
+ public void run() {
+ setupGUI();
+ }
+ });
+ toolkit.realSync();
+
+ Robot robot = new Robot();
+ robot.setAutoDelay(10);
+ robot.mouseMove(scrollPane.getLocationOnScreen().x + 5, scrollPane.getLocationOnScreen().y + 5);
+ robot.mousePress(InputEvent.BUTTON1_MASK);
+ for(int offset = 5; offset < scrollPane.getHeight()-20; offset++) {
+ robot.mouseMove(scrollPane.getLocationOnScreen().x+5, scrollPane.getLocationOnScreen().y+offset);
+ }
+ for(int offset = 5; offset < 195; offset++) {
+ robot.mouseMove(scrollPane.getLocationOnScreen().x+offset, scrollPane.getLocationOnScreen().y+scrollPane.getHeight()-20);
+ }
+ robot.mouseRelease(InputEvent.BUTTON1_MASK);
+ try {
+ SwingUtilities.invokeAndWait(new Runnable() {
+ @Override
+ public void run() {
+ if(scrollPane.getViewport().getViewPosition().getY() < 30) {
+ throw new RuntimeException("Incorrect view position.");
+ };
+ }
+ });
+ } catch (java.lang.reflect.InvocationTargetException ite) {
+ throw new RuntimeException("Test failed, scroll on drag doesn't work!");
+ }
+ }
+
+ public static void setupGUI() {
+ listData = new String[100];
+ for (int i=0; i<100; i++) {
+ listData[i] = "Long Line With Item "+i;
+ }
+ mainFrame = new JFrame("Rest frame");
+ mainFrame.setSize(300, 500);
+ mainFrame.setLayout(new BorderLayout());
+ list = new JListWithScroll(listData);
+ list.setDragEnabled(true);
+ list.setAutoscrolls(true);
+ final DropTarget dropTarget = new DropTarget(list, DnDConstants.ACTION_MOVE, new DropTargetListener() {
+ @Override
+ public void dragEnter(DropTargetDragEvent dtde) {
+ dragOver(dtde);
+ }
+
+ @Override
+ public void dragOver(DropTargetDragEvent dtde) {
+ dtde.acceptDrag(DnDConstants.ACTION_MOVE);
+ }
+
+ @Override
+ public void dropActionChanged(DropTargetDragEvent dtde) {
+ }
+
+ @Override
+ public void dragExit(DropTargetEvent dte) {
+ }
+
+ @Override
+ public void drop(DropTargetDropEvent dtde) {
+ }
+ }, true);
+ scrollPane = new JScrollPane(list);
+ mainFrame.add(scrollPane, BorderLayout.CENTER);
+ mainFrame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
+ mainFrame.setLocation(100, 100);
+ mainFrame.setVisible(true);
+ }
+}
--- a/jdk/test/javax/swing/text/CSSBorder/6796710/bug6796710.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/javax/swing/text/CSSBorder/6796710/bug6796710.java Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
/*
* @test
- * @bug 6796710
+ * @bug 6796710 7124242
* @summary Html content in JEditorPane is overlapping on swing components while resizing the application.
* @library ../../../regtesthelpers
* @build Util
@@ -31,11 +31,10 @@
@run main bug6796710
*/
-import sun.awt.SunToolkit;
-
-import javax.swing.*;
import java.awt.*;
import java.awt.image.BufferedImage;
+import javax.swing.*;
+import sun.awt.SunToolkit;
public class bug6796710 {
// The page is inlined because we want to be sure that the JEditorPane filled synchronously
@@ -68,9 +67,12 @@
robot = new Robot();
SwingUtilities.invokeAndWait(new Runnable() {
+ @Override
public void run() {
frame = new JFrame();
+ frame.setUndecorated(true);
+
pnBottom = new JPanel();
pnBottom.add(new JLabel("Some label"));
pnBottom.add(new JButton("A button"));
@@ -95,9 +97,13 @@
((SunToolkit) SunToolkit.getDefaultToolkit()).realSync();
+ // This delay should be added for MacOSX, realSync is not enough
+ Thread.sleep(1000);
+
BufferedImage bufferedImage = getPnBottomImage();
SwingUtilities.invokeAndWait(new Runnable() {
+ @Override
public void run() {
frame.setSize(400, 150);
}
--- a/jdk/test/lib/security/java.policy/Ext_AllPolicy.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/lib/security/java.policy/Ext_AllPolicy.sh Mon Dec 17 08:30:06 2012 -0500
@@ -77,7 +77,7 @@
${TESTJAVA}${FS}bin${FS}jar -cvf Ext_AllPolicy.jar Ext_AllPolicy.class
rm Ext_AllPolicy.class
-${TESTJAVA}${FS}bin${FS}java \
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} \
-Djava.security.manager -Djava.ext.dirs="${TESTCLASSES}" Ext_AllPolicy
exit $?
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/sun/java2d/OpenGL/CustomCompositeTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 7124347
+ * @summary Verifies that rendering with XOR composite, and arbitraty
+ * custom composite doesn not cause internal errors.
+ *
+ * @run main/othervm -Dsun.java2d.opengl=True CustomCompositeTest
+ */
+
+import java.awt.AWTException;
+import java.awt.Color;
+import java.awt.Composite;
+import java.awt.CompositeContext;
+import java.awt.Dimension;
+import java.awt.GradientPaint;
+import java.awt.Graphics;
+import java.awt.Graphics2D;
+import java.awt.GraphicsConfiguration;
+import java.awt.GraphicsEnvironment;
+import java.awt.ImageCapabilities;
+import java.awt.RenderingHints;
+import java.awt.image.BufferedImage;
+import java.awt.image.ColorModel;
+import java.awt.image.DataBufferInt;
+import java.awt.image.Raster;
+import java.awt.image.SinglePixelPackedSampleModel;
+import java.awt.image.VolatileImage;
+import java.awt.image.WritableRaster;
+import java.util.concurrent.CountDownLatch;
+import javax.swing.JComponent;
+import javax.swing.JFrame;
+import javax.swing.SwingUtilities;
+
+public class CustomCompositeTest {
+
+ private static JFrame frame;
+ private static CountDownLatch paintLatch;
+ private static Throwable paintError;
+
+ public static void main(String[] args) {
+
+ paintLatch = new CountDownLatch(1);
+ paintError = null;
+
+ SwingUtilities.invokeLater(new Runnable() {
+ public void run() {
+ initGUI();
+ }
+ });
+
+ try {
+ paintLatch.await();
+ } catch (InterruptedException e) {
+ };
+ System.out.println("Paint is done!");
+ if (paintError != null) {
+ frame.dispose();
+ throw new RuntimeException("Test FAILED.", paintError);
+ }
+
+ System.out.println("Phase 1: PASSED.");
+
+ // now resise the frame in order to cause re-paint with accelerated
+ // source images.
+ paintError = null;
+ paintLatch = new CountDownLatch(1);
+
+ SwingUtilities.invokeLater(new Runnable() {
+ @Override
+ public void run() {
+ Dimension size = frame.getSize();
+ size.width += 50;
+ size.height += 50;
+
+ frame.setSize(size);
+ }
+ });
+
+ try {
+ paintLatch.await();
+ } catch (InterruptedException e) {
+ };
+ if (paintError != null) {
+ frame.dispose();
+ throw new RuntimeException("Resize test FAILED.", paintError);
+ }
+ frame.dispose();
+ System.out.println("Phase 2: PASSED.");
+
+ GraphicsEnvironment env = GraphicsEnvironment.getLocalGraphicsEnvironment();
+ GraphicsConfiguration cfg = env.getDefaultScreenDevice().getDefaultConfiguration();
+ // test rendering to accelerated volatile image
+ testVolatileImage(cfg, true);
+ System.out.println("Phase 3: PASSED.");
+
+ // test rendering to unaccelerated volatile image
+ testVolatileImage(cfg, false);
+ System.out.println("Phase 4: PASSED.");
+ }
+
+ private static void testVolatileImage(GraphicsConfiguration cfg,
+ boolean accelerated)
+ {
+ VolatileImage dst = null;
+ try {
+ dst = cfg.createCompatibleVolatileImage(640, 480,
+ new ImageCapabilities(accelerated));
+ } catch (AWTException e) {
+ System.out.println("Unable to create volatile image, skip the test.");
+ return;
+ }
+ renderToVolatileImage(dst);
+ }
+
+ private static void renderToVolatileImage(VolatileImage dst) {
+ Graphics2D g = dst.createGraphics();
+ do {
+ System.out.println("Render to volatile image..");
+ try {
+ MyComp.renderTest(g, dst.getHeight(), dst.getHeight());
+ } catch (Throwable e) {
+ throw new RuntimeException("Test FAILED.", e);
+ }
+ } while (dst.contentsLost());
+ System.out.println("Done.");
+ }
+
+ private static void initGUI() {
+ frame = new JFrame("Silly composite");
+ frame.getContentPane().add(new MyComp());
+ frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
+ frame.pack();
+ frame.setVisible(true);
+ }
+
+ private static class MyComp extends JComponent {
+
+ private static BufferedImage theImage;
+
+ public MyComp() {
+ }
+
+ private static BufferedImage getTestImage() {
+ if (theImage == null) {
+ theImage = new BufferedImage(256, 256, BufferedImage.TYPE_INT_ARGB);
+ Graphics2D g2d = theImage.createGraphics();
+ g2d.setColor(Color.red);
+ g2d.fillRect(0, 0, 256, 256);
+
+ g2d.setPaint(new GradientPaint(0, 0, Color.red, 256, 256, Color.blue));
+ g2d.fillRect(0, 100, 256, 256);
+ g2d.dispose();
+ }
+ return theImage;
+ }
+
+ public Dimension getPreferredSize() {
+ return new Dimension(640, 375);
+ }
+
+ public void paintComponent(Graphics g) {
+
+
+ Graphics2D g2d = (Graphics2D) g;
+ try {
+ renderTest(g2d, getWidth(), getHeight());
+ } catch (Throwable e) {
+ paintError = e;
+ }
+ if (paintLatch != null) {
+ paintLatch.countDown();
+ }
+ }
+
+ public static void renderTest(Graphics2D g2d, int w, int h) {
+ g2d.setColor(Color.yellow);
+ g2d.fillRect(0, 0, w, h);
+
+ BufferedImage image = getTestImage();
+ // draw original image
+ g2d.drawRenderedImage(image, null);
+
+ // draw image with custom composite
+ g2d.translate(175, 25);
+ Composite currentComposite = g2d.getComposite();
+ g2d.setComposite(new TestComposite());
+ g2d.drawRenderedImage(image, null);
+ g2d.setComposite(currentComposite);
+
+ // draw image with XOR
+ g2d.translate(175, 25);
+ g2d.setXORMode(Color.red);
+ g2d.drawRenderedImage(image, null);
+
+
+ System.out.println("Painting is done...");
+ }
+ }
+
+ // A silly custom Composite to demonstrate the problem - just inverts the RGB
+ private static class TestComposite implements Composite {
+
+ public CompositeContext createContext(ColorModel srcColorModel, ColorModel dstColorModel, RenderingHints hints) {
+ return new TestCompositeContext();
+ }
+ }
+
+ private static class TestCompositeContext implements CompositeContext {
+
+ public void dispose() {
+ }
+
+ public void compose(Raster src, Raster dstIn, WritableRaster dstOut) {
+ int w = src.getWidth();
+ int h = src.getHeight();
+
+ DataBufferInt srcDB = (DataBufferInt) src.getDataBuffer();
+ DataBufferInt dstOutDB = (DataBufferInt) dstOut.getDataBuffer();
+ int srcRGB[] = srcDB.getBankData()[0];
+ int dstOutRGB[] = dstOutDB.getBankData()[0];
+ int srcOffset = srcDB.getOffset();
+ int dstOutOffset = dstOutDB.getOffset();
+ int srcScanStride = ((SinglePixelPackedSampleModel) src.getSampleModel()).getScanlineStride();
+ int dstOutScanStride = ((SinglePixelPackedSampleModel) dstOut.getSampleModel()).getScanlineStride();
+ int srcAdjust = srcScanStride - w;
+ int dstOutAdjust = dstOutScanStride - w;
+
+ int si = srcOffset;
+ int doi = dstOutOffset;
+
+ for (int i = 0; i < h; i++) {
+ for (int j = 0; j < w; j++) {
+ dstOutRGB[doi] = srcRGB[si] ^ 0x00ffffff;
+ si++;
+ doi++;
+ }
+
+ si += srcAdjust;
+ doi += dstOutAdjust;
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/sun/java2d/cmm/ColorConvertOp/InvalidRenderIntentTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 7064516
+ * @summary Test verifies that incorrect profile rendering intent
+ * does not cause an failure of color conversion op.
+ * @run main InvalidRenderIntentTest
+ */
+
+import java.awt.color.CMMException;
+import java.awt.color.ColorSpace;
+import java.awt.color.ICC_ColorSpace;
+import java.awt.color.ICC_Profile;
+import java.awt.image.ColorConvertOp;
+import java.awt.image.BufferedImage;
+
+import static java.awt.color.ColorSpace.CS_sRGB;
+import static java.awt.image.BufferedImage.TYPE_3BYTE_BGR;
+
+public class InvalidRenderIntentTest {
+
+ public static void main(String[] args) {
+ ICC_Profile pSRGB = ICC_Profile.getInstance(CS_sRGB);
+
+ byte[] raw_data = pSRGB.getData();
+
+ setRenderingIntent(0x1000000, raw_data);
+
+ ICC_Profile p = ICC_Profile.getInstance(raw_data);
+
+ ICC_ColorSpace cs = new ICC_ColorSpace(p);
+
+ // perfrom test color conversion
+ ColorConvertOp op = new ColorConvertOp(cs,
+ ColorSpace.getInstance(CS_sRGB), null);
+ BufferedImage src = new BufferedImage(1, 1, TYPE_3BYTE_BGR);
+ BufferedImage dst = new BufferedImage(1, 1, TYPE_3BYTE_BGR);
+
+ try {
+ op.filter(src.getRaster(), dst.getRaster());
+ } catch (CMMException e) {
+ throw new RuntimeException("Test failed.", e);
+ }
+ System.out.println("Test passed.");
+ }
+
+ private static void setRenderingIntent(int intent, byte[] data) {
+ final int pos = ICC_Profile.icHdrRenderingIntent;
+
+ data[pos + 0] = (byte) (0xff & (intent >> 24));
+ data[pos + 1] = (byte) (0xff & (intent >> 16));
+ data[pos + 2] = (byte) (0xff & (intent >> 8));
+ data[pos + 3] = (byte) (0xff & (intent));
+ }
+}
--- a/jdk/test/sun/jvmstat/monitor/MonitoredVm/MonitorVmStartTerminate.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/jvmstat/monitor/MonitoredVm/MonitorVmStartTerminate.sh Mon Dec 17 08:30:06 2012 -0500
@@ -41,4 +41,4 @@
JAVA="${TESTJAVA}/bin/java"
CP=${TESTJAVA}${FS}lib${FS}tools.jar${PS}${TESTCLASSES}
-${JAVA} -classpath ${CP} MonitorVmStartTerminate
+${JAVA} ${TESTVMOPTS} -classpath ${CP} MonitorVmStartTerminate
--- a/jdk/test/sun/management/jmxremote/bootstrap/CustomLauncherTest.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/management/jmxremote/bootstrap/CustomLauncherTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -150,7 +150,7 @@
done
# Start the manager - this should connect to VM
-${TESTJAVA}/bin/java -classpath ${TESTCLASSES}:${TESTJAVA}/lib/tools.jar \
+${TESTJAVA}/bin/java ${TESTVMOPTS} -classpath ${TESTCLASSES}:${TESTJAVA}/lib/tools.jar \
TestManager $pid $port true
if [ $? != 0 ]; then
echo "Test failed"
--- a/jdk/test/sun/management/jmxremote/bootstrap/LocalManagementTest.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/management/jmxremote/bootstrap/LocalManagementTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -43,8 +43,8 @@
rm -f ${outputfile}
# Start VM with given options
- echo "+ $JAVA $1 Test"
- $JAVA $1 TestApplication > ${outputfile}&
+ echo "+ $JAVA ${TESTVMOPTS} $1 Test"
+ $JAVA ${TESTVMOPTS} $1 TestApplication > ${outputfile}&
pid=$!
# Wait for managed VM to startup
@@ -64,7 +64,7 @@
done
# Start the manager - this should connect to VM
- sh -xc "$JAVA -classpath ${TESTCLASSES}:${TESTJAVA}/lib/tools.jar \
+ sh -xc "$JAVA ${TESTVMOPTS} -classpath ${TESTCLASSES}:${TESTJAVA}/lib/tools.jar \
TestManager $pid $port" 2>&1
if [ $? != 0 ]; then failures=`expr $failures + 1`; fi
}
@@ -112,7 +112,7 @@
# Test 4 - sanity check arguments to management-agent.jar
echo ' '
-sh -xc "${JAVA} -javaagent:${AGENT}=com.sun.management.jmxremote.port=7775,\
+sh -xc "${JAVA} ${TESTVMOPTS} -javaagent:${AGENT}=com.sun.management.jmxremote.port=7775,\
com.sun.management.jmxremote.authenticate=false,com.sun.management.jmxremote.ssl=false \
TestApplication -exit" 2>&1
if [ $? != 0 ]; then failures=`expr $failures + 1`; fi
--- a/jdk/test/sun/management/jmxremote/bootstrap/PasswordFilePermissionTest.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/management/jmxremote/bootstrap/PasswordFilePermissionTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -104,18 +104,18 @@
go() {
echo ''
- sh -xc "$JAVA $1 $2 $3 $4 $5 $6 $7 $8" 2>&1
+ sh -xc "$JAVA ${TESTVMOPTS} $1 $2 $3 $4 $5 $6 $7 $8" 2>&1
if [ $? != 0 ]; then failures=`expr $failures + 1`; fi
}
# Test 1 - password file is secure - VM should start
chmod 700 ${PASSWD}
-sh -xc "$JAVA $mp $pp Null" 2>&1
+sh -xc "$JAVA ${TESTVMOPTS} $mp $pp Null" 2>&1
if [ $? != 0 ]; then failures=`expr $failures + 1`; fi
# Test 2 - password file is not secure - VM should fail to start
chmod o+rx ${PASSWD}
-sh -xc "$JAVA $mp $pp Null" 2>&1
+sh -xc "$JAVA ${TESTVMOPTS} $mp $pp Null" 2>&1
if [ $? = 0 ]; then failures=`expr $failures + 1`; fi
# Reset the file permissions on the generated password file
--- a/jdk/test/sun/management/jmxremote/bootstrap/SSLConfigFilePermissionTest.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/management/jmxremote/bootstrap/SSLConfigFilePermissionTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -101,18 +101,18 @@
go() {
echo ''
- sh -xc "$JAVA $1 $2 $3 $4 $5 $6 $7 $8" 2>&1
+ sh -xc "$JAVA ${TESTVMOPTS} $1 $2 $3 $4 $5 $6 $7 $8" 2>&1
if [ $? != 0 ]; then failures=`expr $failures + 1`; fi
}
# Test 1 - SSL config file is secure - VM should start
chmod 700 ${SSL}
-sh -xc "$JAVA $mp $pp Dummy" 2>&1
+sh -xc "$JAVA ${TESTVMOPTS} $mp $pp Dummy" 2>&1
if [ $? != 0 ]; then failures=`expr $failures + 1`; fi
# Test 2 - SSL config file is not secure - VM should fail to start
chmod o+rx ${SSL}
-sh -xc "$JAVA $mp $pp Dummy" 2>&1
+sh -xc "$JAVA ${TESTVMOPTS} $mp $pp Dummy" 2>&1
if [ $? = 0 ]; then failures=`expr $failures + 1`; fi
# Reset the file permissions on the generated SSL config file
--- a/jdk/test/sun/management/jmxremote/startstop/JMXStartStopTest.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/management/jmxremote/startstop/JMXStartStopTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -61,7 +61,7 @@
}
_app_start(){
- ${TESTJAVA}/bin/java -server $* -cp ${_testclasses} JMXStartStopDoSomething >> ${_logname} 2>&1 &
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} $* -cp ${_testclasses} JMXStartStopDoSomething >> ${_logname} 2>&1 &
npid=`_get_pid`
if [ "${npid}" = "" ]
@@ -103,7 +103,7 @@
}
_testme(){
- ${TESTJAVA}/bin/java -cp ${_testclasses} JMXStartStopTest $*
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} -cp ${_testclasses} JMXStartStopTest $*
}
--- a/jdk/test/sun/misc/Cleaner/exitOnThrow.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/misc/Cleaner/exitOnThrow.sh Mon Dec 17 08:30:06 2012 -0500
@@ -39,7 +39,7 @@
TESTCLASSES=`pwd`
fi
-if $TESTJAVA/bin/java -cp $TESTCLASSES ExitOnThrow; then
+if $TESTJAVA/bin/java ${TESTVMOPTS} -cp $TESTCLASSES ExitOnThrow; then
echo Failed: VM exited normally
exit 1
else
--- a/jdk/test/sun/net/InetAddress/nameservice/dns/cname.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/net/InetAddress/nameservice/dns/cname.sh Mon Dec 17 08:30:06 2012 -0500
@@ -42,7 +42,7 @@
export CLASSPATH
JAVA="${TESTJAVA}/bin/java"
-sh -xc "$JAVA CanonicalName $HOST" 2>&1
+sh -xc "$JAVA ${TESTVMOPTS} CanonicalName $HOST" 2>&1
if [ $? != 0 ]; then
echo "DNS not configured or host doesn't resolve to CNAME record"
exit 0
@@ -52,7 +52,7 @@
go() {
echo ''
- sh -xc "$JAVA $1 Lookup $2" 2>&1
+ sh -xc "$JAVA ${TESTVMOPTS} $1 Lookup $2" 2>&1
if [ $? != 0 ]; then failures=`expr $failures + 1`; fi
}
--- a/jdk/test/sun/net/sdp/sanity.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/net/sdp/sanity.sh Mon Dec 17 08:30:06 2012 -0500
@@ -57,7 +57,7 @@
export CLASSPATH
# Probe for IP addresses plumbed to IB interfaces
-$JAVA -Djava.net.preferIPv4Stack=true ProbeIB ${IB_LINKS} > ${IB_ADDRS}
+$JAVA ${TESTVMOPTS} -Djava.net.preferIPv4Stack=true ProbeIB ${IB_LINKS} > ${IB_ADDRS}
# Create sdp.conf
SDPCONF=sdp.conf
@@ -70,4 +70,4 @@
done
# Sanity check
-$JAVA -Djava.net.preferIPv4Stack=true -Dcom.sun.sdp.conf=${SDPCONF} -Dcom.sun.sdp.debug Sanity
+$JAVA ${TESTVMOPTS} -Djava.net.preferIPv4Stack=true -Dcom.sun.sdp.conf=${SDPCONF} -Dcom.sun.sdp.debug Sanity
--- a/jdk/test/sun/net/www/MarkResetTest.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/net/www/MarkResetTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -52,4 +52,4 @@
# in this directory
cp ${TESTSRC}${FS}EncDec.doc .
-${TESTJAVA}${FS}bin${FS}java MarkResetTest
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} MarkResetTest
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/sun/net/www/MessageHeaderTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8003948
+ * @run main MessageHeaderTest
+ */
+import java.io.*;
+import sun.net.www.MessageHeader;
+
+public class MessageHeaderTest {
+ public static void main (String[] args) throws Exception {
+ for (int i=0; i<7; i++) {
+ ByteArrayInputStream bis = new ByteArrayInputStream(headers[i].getBytes());
+ MessageHeader h = new MessageHeader(bis);
+ String before = h.toString();
+ before = before.substring(before.indexOf('{'));
+ boolean result = h.filterNTLMResponses("WWW-Authenticate");
+ String after = h.toString();
+ after = after.substring(after.indexOf('{'));
+ if (!expected[i].equals(after)) {
+ throw new RuntimeException(Integer.toString(i) + " expected != after");
+ }
+ if (result != expectedResult[i]) {
+ throw new RuntimeException(Integer.toString(i) + " result != expectedResult");
+ }
+ }
+ }
+
+ static String expected[] = {
+ "{null: HTTP/1.1 200 Ok}{Foo: bar}{Bar: foo}{WWW-Authenticate: NTLM sdsds}",
+ "{null: HTTP/1.1 200 Ok}{Foo: bar}{Bar: foo}{WWW-Authenticate: }",
+ "{null: HTTP/1.1 200 Ok}{Foo: bar}{Bar: foo}{WWW-Authenticate: NTLM sdsds}",
+ "{null: HTTP/1.1 200 Ok}{Foo: bar}{Bar: foo}{WWW-Authenticate: NTLM sdsds}",
+ "{null: HTTP/1.1 200 Ok}{Foo: bar}{Bar: foo}{WWW-Authenticate: NTLM sdsds}{Bar: foo}",
+ "{null: HTTP/1.1 200 Ok}{WWW-Authenticate: Negotiate}{Foo: bar}{Bar: foo}{WWW-Authenticate: NTLM}{Bar: foo}{WWW-Authenticate: Kerberos}",
+ "{null: HTTP/1.1 200 Ok}{Foo: foo}{Bar: }{WWW-Authenticate: NTLM blob}{Bar: foo blob}"
+ };
+
+ static boolean[] expectedResult = {
+ false, false, true, true, true, false, false
+ };
+
+ static String[] headers = {
+ "HTTP/1.1 200 Ok\r\nFoo: bar\r\nBar: foo\r\nWWW-Authenticate: NTLM sdsds",
+ "HTTP/1.1 200 Ok\r\nFoo: bar\r\nBar: foo\r\nWWW-Authenticate:",
+ "HTTP/1.1 200 Ok\r\nFoo: bar\r\nBar: foo\r\nWWW-Authenticate: NTLM sdsds\r\nWWW-Authenticate: Negotiate",
+ "HTTP/1.1 200 Ok\r\nFoo: bar\r\nBar: foo\r\nWWW-Authenticate: NTLM sdsds\r\nWWW-Authenticate: Negotiate\r\nWWW-Authenticate: Kerberos",
+ "HTTP/1.1 200 Ok\r\nWWW-Authenticate: Negotiate\r\nFoo: bar\r\nBar: foo\r\nWWW-Authenticate: NTLM sdsds\r\nBar: foo\r\nWWW-Authenticate: Kerberos",
+ "HTTP/1.1 200 Ok\r\nWWW-Authenticate: Negotiate\r\nFoo: bar\r\nBar: foo\r\nWWW-Authenticate: NTLM\r\nBar: foo\r\nWWW-Authenticate: Kerberos",
+ "HTTP/1.1 200 Ok\r\nFoo: foo\r\nBar:\r\nWWW-Authenticate: NTLM blob\r\nBar: foo blob"
+ };
+}
--- a/jdk/test/sun/net/www/http/HttpClient/RetryPost.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/net/www/http/HttpClient/RetryPost.sh Mon Dec 17 08:30:06 2012 -0500
@@ -50,14 +50,14 @@
${TESTJAVA}${FS}bin${FS}javac -d . ${TESTSRC}${FS}RetryPost.java
# run with no option specified. Should retry POST request.
-${TESTJAVA}${FS}bin${FS}java RetryPost
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} RetryPost
result=$?
if [ "$result" -ne "0" ]; then
exit 1
fi
# run with option specified. Should not retry POST request.
-${TESTJAVA}${FS}bin${FS}java -Dsun.net.http.retryPost=false RetryPost noRetry
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -Dsun.net.http.retryPost=false RetryPost noRetry
result=$?
if [ "$result" -ne "0" ]; then
exit 1
--- a/jdk/test/sun/net/www/protocol/file/DirPermissionDenied.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/net/www/protocol/file/DirPermissionDenied.sh Mon Dec 17 08:30:06 2012 -0500
@@ -35,7 +35,7 @@
mkdir -p ${TESTDIR}
chmod 333 ${TESTDIR}
-$TESTJAVA/bin/java -classpath $TESTCLASSES DirPermissionDenied ${TESTDIR}
+$TESTJAVA/bin/java ${TESTVMOPTS} -classpath $TESTCLASSES DirPermissionDenied ${TESTDIR}
result=$?
# Add back read access for user, otherwise not removable on some systems
--- a/jdk/test/sun/net/www/protocol/jar/B5105410.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/net/www/protocol/jar/B5105410.sh Mon Dec 17 08:30:06 2012 -0500
@@ -51,5 +51,5 @@
cp ${TESTSRC}${FS}foo2.jar .
${TESTJAVA}${FS}bin${FS}javac -d . ${TESTSRC}${FS}B5105410.java
-${TESTJAVA}${FS}bin${FS}java B5105410
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} B5105410
--- a/jdk/test/sun/net/www/protocol/jar/getcontenttype.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/net/www/protocol/jar/getcontenttype.sh Mon Dec 17 08:30:06 2012 -0500
@@ -33,5 +33,5 @@
if [ x"$TESTSRC" = x ]; then TESTSRC=.; fi
# now start the test
-${TESTJAVA}/bin/java -Djava.ext.dirs=$TESTSRC -cp $TESTCLASSES GetContentType
+${TESTJAVA}/bin/java ${TESTVMOPTS} -Djava.ext.dirs=$TESTSRC -cp $TESTCLASSES GetContentType
--- a/jdk/test/sun/net/www/protocol/jar/jarbug/run.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/net/www/protocol/jar/jarbug/run.sh Mon Dec 17 08:30:06 2012 -0500
@@ -71,7 +71,7 @@
#
${TESTJAVA}${FS}bin${FS}javac -d ${DEST} ${TESTSRC}${FS}src${FS}test${FS}*.java
cd ${DEST}
-${TESTJAVA}${FS}bin${FS}java RunAllTests
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} RunAllTests
result=$?
if [ "$result" -ne "0" ]; then
exit 1
--- a/jdk/test/sun/rmi/log/ReliableLog/LogAlignmentTest.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/rmi/log/ReliableLog/LogAlignmentTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -22,8 +22,10 @@
*/
/* @test
- @bug 4094889
- @summary rmid can have a corrupted log
+ * @bug 4094889
+ * @summary rmid can have a corrupted log
+ *
+ * @run main LogAlignmentTest
*/
/* Fault: ReliableLog used RandomAccessFile.skipBytes() to seek past the end
--- a/jdk/test/sun/rmi/log/ReliableLog/SnapshotSize.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/rmi/log/ReliableLog/SnapshotSize.java Mon Dec 17 08:30:06 2012 -0500
@@ -25,6 +25,8 @@
* @bug 4319866
* @summary Verify that ReliableLog.snapshotSize() returns correct snapshot
* file size even if LogHandler doesn't flush.
+ *
+ * @run main SnapshotSize
*/
import java.io.ByteArrayOutputStream;
--- a/jdk/test/sun/rmi/rmic/RMIGenerator/RmicDefault.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/rmi/rmic/RMIGenerator/RmicDefault.java Mon Dec 17 08:30:06 2012 -0500
@@ -28,7 +28,6 @@
* @library ../../../../java/rmi/testlibrary
*
* @build StreamPipe
- * @build RmicDefault
* @run main RmicDefault
*/
--- a/jdk/test/sun/rmi/rmic/manifestClassPath/run.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/rmi/rmic/manifestClassPath/run.sh Mon Dec 17 08:30:06 2012 -0500
@@ -114,26 +114,26 @@
Success "$javac" -classpath "jars/A.jar" Main.java MainI.java
Success "$rmic" -classpath "jars/A.jar${PS}." Main
-Success "$java" -classpath "jars/A.jar${PS}." Main
+Success "$java" ${TESTVMOPTS} -classpath "jars/A.jar${PS}." Main
Sys rm -f Main.class MainI.class Main_Stub.class
Success "$javac" -classpath "jars/sub/B.zip" Main.java MainI.java
Success "$rmic" -classpath "jars/sub/B.zip${PS}." Main
-Success "$java" -classpath "jars/sub/B.zip${PS}." Main
+Success "$java" ${TESTVMOPTS} -classpath "jars/sub/B.zip${PS}." Main
#Sys rm -f Main.class MainI.class Main_Stub.class
Sys rm -f Main_Stub.class # javac -extdirs workaround
#Success "$javac" -extdirs "jars" -classpath None Main.java MainI.java
Success "$rmic" -extdirs "jars" -classpath . Main
-Success "$java" -Djava.ext.dirs="jars" -cp . Main
+Success "$java" ${TESTVMOPTS} -Djava.ext.dirs="jars" -cp . Main
Sys rm -f Main_Stub.class
#Success "$javac" -extdirs "jars/sub" -classpath None Main.java MainI.java
Success "$rmic" -extdirs "jars/sub" -classpath . Main
-Success "$java" -Djava.ext.dirs="jars/sub" -cp . Main
+Success "$java" ${TESTVMOPTS} -Djava.ext.dirs="jars/sub" -cp . Main
Cleanup
--- a/jdk/test/sun/rmi/rmic/minimizeWrapperInstances/run.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/rmi/rmic/minimizeWrapperInstances/run.sh Mon Dec 17 08:30:06 2012 -0500
@@ -41,13 +41,13 @@
set -ex
${TESTJAVA}/bin/rmic -classpath ${TESTCLASSES:-.} -d ${TESTCLASSES:-.} PImpl
-${TESTJAVA}/bin/java -classpath ${TESTCLASSES:-.} Test
+${TESTJAVA}/bin/java ${TESTVMOPTS} -classpath ${TESTCLASSES:-.} Test
${TESTJAVA}/bin/rmic -classpath ${TESTCLASSES:-.} -d ${TESTCLASSES:-.} -vcompat PImpl
-${TESTJAVA}/bin/java -classpath ${TESTCLASSES:-.} Test
+${TESTJAVA}/bin/java ${TESTVMOPTS} -classpath ${TESTCLASSES:-.} Test
${TESTJAVA}/bin/rmic -Xnew -classpath ${TESTCLASSES:-.} -d ${TESTCLASSES:-.} PImpl
-${TESTJAVA}/bin/java -classpath ${TESTCLASSES:-.} Test
+${TESTJAVA}/bin/java ${TESTVMOPTS} -classpath ${TESTCLASSES:-.} Test
${TESTJAVA}/bin/rmic -Xnew -classpath ${TESTCLASSES:-.} -d ${TESTCLASSES:-.} -vcompat PImpl
-${TESTJAVA}/bin/java -classpath ${TESTCLASSES:-.} Test
+${TESTJAVA}/bin/java ${TESTVMOPTS} -classpath ${TESTCLASSES:-.} Test
--- a/jdk/test/sun/rmi/rmic/newrmic/equivalence/run.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/rmi/rmic/newrmic/equivalence/run.sh Mon Dec 17 08:30:06 2012 -0500
@@ -26,22 +26,24 @@
# @summary This test verifies that the new implementation of rmic
# generates equivalent classes as the old implementation, for a set
# of sample input classes.
-# @library ../../../../../java/rmi/testlibrary
-# @build TestLibrary
# @author Peter Jones
#
-# @build AgentServerImpl
-# @build AppleImpl
-# @build AppleUserImpl
-# @build ComputeServerImpl
-# @build CountServerImpl
-# @build DayTimeServerImpl
-# @build G1Impl
-# @build MyObjectImpl
-# @build NotActivatableServerImpl
-# @build OrangeEchoImpl
-# @build OrangeImpl
-# @build ServerImpl
+# @library ../../../../../java/rmi/testlibrary
+#
+# @build TestLibrary
+# AgentServerImpl
+# AppleImpl
+# AppleUserImpl
+# ComputeServerImpl
+# CountServerImpl
+# DayTimeServerImpl
+# G1Impl
+# MyObjectImpl
+# NotActivatableServerImpl
+# OrangeEchoImpl
+# OrangeImpl
+# ServerImpl
+#
# @run shell run.sh
if [ "${TESTJAVA}" = "" ]
--- a/jdk/test/sun/rmi/rmic/oldjavacRemoved/sunToolsJavacMain.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/rmi/rmic/oldjavacRemoved/sunToolsJavacMain.sh Mon Dec 17 08:30:06 2012 -0500
@@ -37,7 +37,7 @@
set -x
-${TESTJAVA}/bin/java -classpath ${TESTJAVA}/lib/tools.jar sun.tools.javac.Main -d ${TESTCLASSES:-.} ${TESTSRC:-.}/Foo.java
+${TESTJAVA}/bin/java ${TESTVMOPTS} -classpath ${TESTJAVA}/lib/tools.jar sun.tools.javac.Main -d ${TESTCLASSES:-.} ${TESTSRC:-.}/Foo.java
result=$?
if [ $result -eq 0 ]
--- a/jdk/test/sun/rmi/runtime/Log/6409194/NoConsoleOutput.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/rmi/runtime/Log/6409194/NoConsoleOutput.java Mon Dec 17 08:30:06 2012 -0500
@@ -31,8 +31,7 @@
* @author Peter Jones
*
* @library ../../../../../java/rmi/testlibrary
- * @build JavaVM
- * @build NoConsoleOutput
+ * @build TestLibrary JavaVM
* @run main/othervm NoConsoleOutput
*/
--- a/jdk/test/sun/rmi/runtime/Log/checkLogging/CheckLogStreams.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/rmi/runtime/Log/checkLogging/CheckLogStreams.java Mon Dec 17 08:30:06 2012 -0500
@@ -28,11 +28,7 @@
* @author Laird Dornin
*
* @library ../../../../../java/rmi/testlibrary
- * @build TestLibrary
- * @build TestParams
- * @build TestFailedException
- * @build CheckLogging
- * @build CheckLogStreams
+ * @build TestLibrary CheckLogging
* @run main/othervm -Dsun.rmi.log.useOld=true CheckLogStreams
*/
--- a/jdk/test/sun/rmi/runtime/Log/checkLogging/CheckLogging.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/rmi/runtime/Log/checkLogging/CheckLogging.java Mon Dec 17 08:30:06 2012 -0500
@@ -29,9 +29,6 @@
*
* @library ../../../../../java/rmi/testlibrary
* @build TestLibrary
- * @build TestParams
- * @build TestFailedException
- * @build CheckLogging
* @run main/othervm CheckLogging
*/
--- a/jdk/test/sun/rmi/server/MarshalOutputStream/marshalForeignStub/MarshalForeignStub.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/rmi/server/MarshalOutputStream/marshalForeignStub/MarshalForeignStub.java Mon Dec 17 08:30:06 2012 -0500
@@ -31,11 +31,7 @@
* @author Ann Wollrath
*
* @library ../../../../../java/rmi/testlibrary
- * @build TestLibrary
- * @build TestFailedException
- * @build MarshalForeignStub
- * @build Receiver
- * @build MarshalForeignStub_Stub
+ * @build TestLibrary Receiver MarshalForeignStub_Stub
* @run main/othervm/policy=security.policy MarshalForeignStub
*/
--- a/jdk/test/sun/rmi/transport/proxy/EagerHttpFallback.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/rmi/transport/proxy/EagerHttpFallback.java Mon Dec 17 08:30:06 2012 -0500
@@ -25,6 +25,7 @@
* @bug 4290727
* @summary Verify that ConnectException will trigger HTTP fallback if
* sun.rmi.transport.proxy.eagerHttpFallback system property is set.
+ *
* @library ../../../../java/rmi/testlibrary
* @build TestLibrary
* @run main/othervm EagerHttpFallback
--- a/jdk/test/sun/rmi/transport/tcp/DeadCachedConnection.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/rmi/transport/tcp/DeadCachedConnection.java Mon Dec 17 08:30:06 2012 -0500
@@ -25,9 +25,7 @@
* @bug 4094891
* @summary unable to retry call if cached connection to server is used
* @library ../../../../java/rmi/testlibrary
- * @build DeadCachedConnection
- * @build JavaVM
- * @build TestLibrary
+ * @build TestLibrary JavaVM
* @run main/othervm DeadCachedConnection
*/
--- a/jdk/test/sun/rmi/transport/tcp/blockAccept/BlockAcceptTest.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/rmi/transport/tcp/blockAccept/BlockAcceptTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -27,11 +27,8 @@
* @summary RMI blocks in HttpAwareServerSocket.accept() if you telnet to it
* @author Adrian Colley
*
- * @library ../../../../../java/rmi/testlibrary/
- * @build TestIface
- * @build TestImpl
- * @build TestImpl_Stub
- * @build BlockAcceptTest
+ * @library ../../../../../java/rmi/testlibrary
+ * @build TestIface TestImpl TestImpl_Stub
* @run main/othervm/policy=security.policy/timeout=60 BlockAcceptTest
*/
--- a/jdk/test/sun/rmi/transport/tcp/disableMultiplexing/DisableMultiplexing.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/rmi/transport/tcp/disableMultiplexing/DisableMultiplexing.java Mon Dec 17 08:30:06 2012 -0500
@@ -28,7 +28,6 @@
* on that port, rather than engage in the deprecated "multiplexing protocol".
* @author Peter Jones
*
- * @build DisableMultiplexing
* @build DisableMultiplexing_Stub
* @run main/othervm DisableMultiplexing
*/
--- a/jdk/test/sun/security/krb5/auto/DynamicKeytab.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/krb5/auto/DynamicKeytab.java Mon Dec 17 08:30:06 2012 -0500
@@ -110,11 +110,13 @@
throw new Exception("Should not success");
} catch (GSSException gsse) {
System.out.println(gsse);
- KrbException ke = (KrbException)gsse.getCause();
- if (ke.returnCode() != Krb5.KRB_AP_ERR_BADKEYVER) {
- throw new Exception("Not expected failure code: " +
- ke.returnCode());
- }
+ // Since 7197159, different kvno is accepted, this return code
+ // will never be thrown out again.
+ //KrbException ke = (KrbException)gsse.getCause();
+ //if (ke.returnCode() != Krb5.KRB_AP_ERR_BADKEYVER) {
+ // throw new Exception("Not expected failure code: " +
+ // ke.returnCode());
+ //}
}
// Test 8: an empty KDC means revoke all
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/sun/security/krb5/auto/KeyPermissions.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8004488
+ * @summary wrong permissions checked in krb5
+ * @compile -XDignore.symbol.file KeyPermissions.java
+ * @run main/othervm KeyPermissions
+ */
+
+import java.security.AccessControlException;
+import java.security.Permission;
+import javax.security.auth.PrivateCredentialPermission;
+import sun.security.jgss.GSSUtil;
+
+public class KeyPermissions extends SecurityManager {
+
+ @Override
+ public void checkPermission(Permission perm) {
+ if (perm instanceof PrivateCredentialPermission) {
+ if (!perm.getName().startsWith("javax.security.auth.kerberos.")) {
+ throw new AccessControlException(
+ "I don't like this", perm);
+ }
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ System.setSecurityManager(new KeyPermissions());
+ new OneKDC(null).writeJAASConf();
+ Context s = Context.fromJAAS("server");
+ s.startAsServer(GSSUtil.GSS_KRB5_MECH_OID);
+ }
+}
+
--- a/jdk/test/sun/security/krb5/auto/KeyTabCompat.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/krb5/auto/KeyTabCompat.java Mon Dec 17 08:30:06 2012 -0500
@@ -24,6 +24,7 @@
/*
* @test
* @bug 6894072
+ * @bug 8004488
* @compile -XDignore.symbol.file KeyTabCompat.java
* @run main/othervm KeyTabCompat
* @summary always refresh keytab
@@ -70,21 +71,8 @@
s.startAsServer(GSSUtil.GSS_KRB5_MECH_OID);
s.status();
- if (s.s().getPrivateCredentials(KerberosKey.class).size() != 1) {
- throw new Exception("There should be one KerberosKey");
+ if (s.s().getPrivateCredentials(KerberosKey.class).size() != 0) {
+ throw new Exception("There should be no KerberosKey");
}
-
- Thread.sleep(2000); // make sure ktab timestamp is different
-
- kdc.addPrincipal(OneKDC.SERVER, "pass2".toCharArray());
- kdc.writeKtab(OneKDC.KTAB);
-
- Context.handshake(c, s);
- s.status();
-
- if (s.s().getPrivateCredentials(KerberosKey.class).size() != 1) {
- throw new Exception("There should be only one KerberosKey");
- }
-
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/sun/security/krb5/auto/KvnoNA.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 7197159
+ * @compile -XDignore.symbol.file KvnoNA.java
+ * @run main/othervm KvnoNA
+ * @summary accept different kvno if there no match
+ */
+
+import org.ietf.jgss.GSSException;
+import sun.security.jgss.GSSUtil;
+import sun.security.krb5.KrbException;
+import sun.security.krb5.PrincipalName;
+import sun.security.krb5.internal.ktab.KeyTab;
+import sun.security.krb5.internal.Krb5;
+
+public class KvnoNA {
+
+ public static void main(String[] args)
+ throws Exception {
+
+ OneKDC kdc = new OneKDC(null);
+ kdc.writeJAASConf();
+
+ // In KDC, it's 2
+ char[] pass = "pass2".toCharArray();
+ kdc.addPrincipal(OneKDC.SERVER, pass);
+
+ // In ktab, kvno is 1 or 3, 3 has the same password
+ KeyTab ktab = KeyTab.create(OneKDC.KTAB);
+ PrincipalName p = new PrincipalName(
+ OneKDC.SERVER+"@"+OneKDC.REALM, PrincipalName.KRB_NT_SRV_HST);
+ ktab.addEntry(p, "pass1".toCharArray(), 1, true);
+ ktab.addEntry(p, "pass2".toCharArray(), 3, true);
+ ktab.save();
+
+ Context c, s;
+
+ c = Context.fromUserPass("dummy", "bogus".toCharArray(), false);
+ s = Context.fromJAAS("server");
+
+ c.startAsClient(OneKDC.SERVER, GSSUtil.GSS_KRB5_MECH_OID);
+ s.startAsServer(GSSUtil.GSS_KRB5_MECH_OID);
+
+ Context.handshake(c, s);
+
+ s.dispose();
+ c.dispose();
+ }
+}
--- a/jdk/test/sun/security/krb5/auto/MoreKvno.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/krb5/auto/MoreKvno.java Mon Dec 17 08:30:06 2012 -0500
@@ -23,8 +23,7 @@
/*
* @test
- * @bug 6893158
- * @bug 6907425
+ * @bug 6893158 6907425 7197159
* @run main/othervm MoreKvno
* @summary AP_REQ check should use key version number
*/
@@ -69,11 +68,13 @@
go(OneKDC.SERVER, "com.sun.security.jgss.krb5.accept", pass);
throw new Exception("This test should fail");
} catch (GSSException gsse) {
- KrbException ke = (KrbException)gsse.getCause();
- if (ke.returnCode() != Krb5.KRB_AP_ERR_BADKEYVER) {
- throw new Exception("Not expected failure code: " +
- ke.returnCode());
- }
+ // Since 7197159, different kvno is accepted, this return code
+ // will never be thrown out again.
+ //KrbException ke = (KrbException)gsse.getCause();
+ //if (ke.returnCode() != Krb5.KRB_AP_ERR_BADKEYVER) {
+ // throw new Exception("Not expected failure code: " +
+ // ke.returnCode());
+ //}
}
}
--- a/jdk/test/sun/security/krb5/runNameEquals.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/krb5/runNameEquals.sh Mon Dec 17 08:30:06 2012 -0500
@@ -81,7 +81,7 @@
if [ "${NATIVE}" = "true" ] ; then
echo "Testing native provider"
- ${TESTJAVA}${FILESEP}bin${FILESEP}java \
+ ${TESTJAVA}${FILESEP}bin${FILESEP}java ${TESTVMOPTS} \
-classpath ${TESTCLASSES} \
-Dsun.security.jgss.native=true \
${TEST}
@@ -92,7 +92,7 @@
fi
echo "Testing java provider"
-${TESTJAVA}${FILESEP}bin${FILESEP}java \
+${TESTJAVA}${FILESEP}bin${FILESEP}java ${TESTVMOPTS} \
-classpath ${TESTCLASSES} \
-Djava.security.krb5.realm=R \
-Djava.security.krb5.kdc=127.0.0.1 \
--- a/jdk/test/sun/security/krb5/tools/ktcheck.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/krb5/tools/ktcheck.sh Mon Dec 17 08:30:06 2012 -0500
@@ -58,7 +58,7 @@
EXTRA_OPTIONS="-Djava.security.krb5.conf=${TESTSRC}${FS}onlythree.conf"
KTAB="${TESTJAVA}${FS}bin${FS}ktab -J${EXTRA_OPTIONS} -k $KEYTAB -f"
-CHECK="${TESTJAVA}${FS}bin${FS}java ${EXTRA_OPTIONS} KtabCheck $KEYTAB"
+CHECK="${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} ${EXTRA_OPTIONS} KtabCheck $KEYTAB"
echo ${EXTRA_OPTIONS}
--- a/jdk/test/sun/security/mscapi/AccessKeyStore.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/mscapi/AccessKeyStore.sh Mon Dec 17 08:30:06 2012 -0500
@@ -42,13 +42,13 @@
${TESTJAVA}/bin/javac -d . ${TESTSRC}\\AccessKeyStore.java
echo "Using access.policy..."
- ${TESTJAVA}/bin/java \
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} \
-Djava.security.manager \
-Djava.security.policy==${TESTSRC}\\access.policy \
AccessKeyStore
echo "Using noaccess.policy..."
- ${TESTJAVA}/bin/java \
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} \
-Djava.security.manager \
-Djava.security.policy==${TESTSRC}\\noaccess.policy \
AccessKeyStore -deny
--- a/jdk/test/sun/security/mscapi/IsSunMSCAPIAvailable.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/mscapi/IsSunMSCAPIAvailable.sh Mon Dec 17 08:30:06 2012 -0500
@@ -40,7 +40,7 @@
# execute test program - rely on it to exit if platform unsupported
${TESTJAVA}/bin/javac -d . ${TESTSRC}\\IsSunMSCAPIAvailable.java
- ${TESTJAVA}/bin/java IsSunMSCAPIAvailable
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} IsSunMSCAPIAvailable
exit
;;
--- a/jdk/test/sun/security/mscapi/KeyStoreCompatibilityMode.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/mscapi/KeyStoreCompatibilityMode.sh Mon Dec 17 08:30:06 2012 -0500
@@ -43,15 +43,15 @@
${TESTJAVA}/bin/javac -d . ${TESTSRC}\\KeyStoreCompatibilityMode.java
# mode implicitly enabled
- ${TESTJAVA}/bin/java KeyStoreCompatibilityMode
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} KeyStoreCompatibilityMode
# mode explicitly enabled
- ${TESTJAVA}/bin/java \
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} \
-Dsun.security.mscapi.keyStoreCompatibilityMode="true" \
KeyStoreCompatibilityMode
# mode explicitly disabled
- ${TESTJAVA}/bin/java \
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} \
-Dsun.security.mscapi.keyStoreCompatibilityMode="false" \
KeyStoreCompatibilityMode -disable
--- a/jdk/test/sun/security/mscapi/PublicKeyInterop.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/mscapi/PublicKeyInterop.sh Mon Dec 17 08:30:06 2012 -0500
@@ -62,7 +62,7 @@
echo
echo "Running the test..."
${TESTJAVA}/bin/javac -d . ${TESTSRC}\\PublicKeyInterop.java
- ${TESTJAVA}/bin/java PublicKeyInterop
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} PublicKeyInterop
rc=$?
--- a/jdk/test/sun/security/mscapi/RSAEncryptDecrypt.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/mscapi/RSAEncryptDecrypt.sh Mon Dec 17 08:30:06 2012 -0500
@@ -70,7 +70,7 @@
# unsupported
${TESTJAVA}/bin/javac -d . ${TESTSRC}\\RSAEncryptDecrypt.java
- ${TESTJAVA}/bin/java RSAEncryptDecrypt
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} RSAEncryptDecrypt
exit
;;
--- a/jdk/test/sun/security/mscapi/ShortRSAKey1024.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/mscapi/ShortRSAKey1024.sh Mon Dec 17 08:30:06 2012 -0500
@@ -89,7 +89,7 @@
echo "Running the test..."
${TESTJAVA}${FS}bin${FS}javac -d . \
${TESTSRC}${FS}ShortRSAKeyWithinTLS.java
- ${TESTJAVA}${FS}bin${FS}java ShortRSAKeyWithinTLS 7106773.$BITS $BITS \
+ ${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} ShortRSAKeyWithinTLS 7106773.$BITS $BITS \
TLSv1.2 TLS_DHE_RSA_WITH_AES_128_CBC_SHA
rc=$?
--- a/jdk/test/sun/security/mscapi/SignUsingNONEwithRSA.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/mscapi/SignUsingNONEwithRSA.sh Mon Dec 17 08:30:06 2012 -0500
@@ -61,7 +61,7 @@
echo
echo "Running the test..."
${TESTJAVA}/bin/javac -d . ${TESTSRC}\\SignUsingNONEwithRSA.java
- ${TESTJAVA}/bin/java SignUsingNONEwithRSA
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} SignUsingNONEwithRSA
rc=$?
--- a/jdk/test/sun/security/mscapi/SignUsingSHA2withRSA.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/mscapi/SignUsingSHA2withRSA.sh Mon Dec 17 08:30:06 2012 -0500
@@ -61,7 +61,7 @@
echo
echo "Running the test..."
${TESTJAVA}/bin/javac -d . ${TESTSRC}\\SignUsingSHA2withRSA.java
- ${TESTJAVA}/bin/java SignUsingSHA2withRSA
+ ${TESTJAVA}/bin/java ${TESTVMOPTS} SignUsingSHA2withRSA
rc=$?
--- a/jdk/test/sun/security/pkcs11/KeyStore/Basic.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/pkcs11/KeyStore/Basic.sh Mon Dec 17 08:30:06 2012 -0500
@@ -171,7 +171,7 @@
# run test
-${TESTJAVA}${FS}bin${FS}java \
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} \
-classpath ${TESTCLASSES}${PS}${TESTSRC}${FS}loader.jar \
-DDIR=${TESTSRC}${FS}BasicData \
-DCUSTOM_DB_DIR=${TESTCLASSES} \
--- a/jdk/test/sun/security/pkcs11/KeyStore/ClientAuth.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/pkcs11/KeyStore/ClientAuth.sh Mon Dec 17 08:30:06 2012 -0500
@@ -128,7 +128,7 @@
# run test
echo "Run ClientAuth ..."
-${TESTJAVA}${FS}bin${FS}java \
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} \
-classpath ${TESTCLASSES}${PS}${TESTSRC}${FS}loader.jar \
-DDIR=${TESTSRC}${FS}ClientAuthData${FS} \
-DCUSTOM_DB_DIR=${TESTCLASSES} \
@@ -149,7 +149,7 @@
# run test with specified TLS protocol and cipher suite
echo "Run ClientAuth TLSv1.2 TLS_DHE_RSA_WITH_AES_128_CBC_SHA"
-${TESTJAVA}${FS}bin${FS}java \
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} \
-classpath ${TESTCLASSES}${PS}${TESTSRC}${FS}loader.jar \
-DDIR=${TESTSRC}${FS}ClientAuthData${FS} \
-DCUSTOM_DB_DIR=${TESTCLASSES} \
--- a/jdk/test/sun/security/pkcs11/KeyStore/SecretKeysBasic.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/pkcs11/KeyStore/SecretKeysBasic.sh Mon Dec 17 08:30:06 2012 -0500
@@ -141,7 +141,7 @@
# run test
cd ${TESTSRC}
-${TESTJAVA}${FS}bin${FS}java \
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} \
-DDIR=${TESTSRC}${FS}BasicData${FS} \
-classpath ${TESTCLASSES}${PS}${TESTSRC}${FS}loader.jar \
-DCUSTOM_DB_DIR=${TESTCLASSES} \
--- a/jdk/test/sun/security/pkcs11/KeyStore/Solaris.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/pkcs11/KeyStore/Solaris.sh Mon Dec 17 08:30:06 2012 -0500
@@ -142,7 +142,7 @@
# run test
cd ${TESTSRC}
-${TESTJAVA}${FS}bin${FS}java \
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} \
-classpath ${TESTCLASSES}${PS}${TESTSRC}${FS}loader.jar \
-DDIR=${TESTSRC}${FS}BasicData${FS} \
-DCUSTOM_P11_CONFIG=${TESTSRC}${FS}BasicData${FS}p11-solaris.txt \
--- a/jdk/test/sun/security/pkcs11/Provider/ConfigQuotedString.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/pkcs11/Provider/ConfigQuotedString.sh Mon Dec 17 08:30:06 2012 -0500
@@ -99,7 +99,7 @@
# run test
-${TESTJAVA}${FS}bin${FS}java \
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} \
-classpath ${TESTCLASSES} \
-DCUSTOM_P11_CONFIG=${TESTSRC}${FS}ConfigQuotedString-nss.txt \
-Dtest.src=${TESTSRC} \
--- a/jdk/test/sun/security/pkcs11/Provider/Login.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/pkcs11/Provider/Login.sh Mon Dec 17 08:30:06 2012 -0500
@@ -108,7 +108,7 @@
# run test
-${TESTJAVA}${FS}bin${FS}java \
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} \
-classpath ${TESTCLASSES} \
-DCUSTOM_DB_DIR=${TESTCLASSES} \
-DCUSTOM_P11_CONFIG=${TESTSRC}${FS}Login-nss.txt \
--- a/jdk/test/sun/security/pkcs11/ec/TestECDSA.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/pkcs11/ec/TestECDSA.java Mon Dec 17 08:30:06 2012 -0500
@@ -179,7 +179,6 @@
// SHA1withECDSA and NONEwithECDSA
Signature s = Signature.getInstance("SHA1withECDSA", provider);
s.initSign(privateKey);
- s.initSign(privateKey);
s.update(data);
byte[] s1 = s.sign();
--- a/jdk/test/sun/security/provider/PolicyFile/GrantAllPermToExtWhenNoPolicy.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/provider/PolicyFile/GrantAllPermToExtWhenNoPolicy.sh Mon Dec 17 08:30:06 2012 -0500
@@ -82,7 +82,7 @@
${TESTJAVA}${FILESEP}jre${FILESEP}lib${FILESEP}security${FILESEP}tmp_pol
# run the test program
-${TESTJAVA}${FILESEP}bin${FILESEP}java -Djava.security.manager \
+${TESTJAVA}${FILESEP}bin${FILESEP}java ${TESTVMOPTS} -Djava.security.manager \
GrantAllPermToExtWhenNoPolicy
# save error status
--- a/jdk/test/sun/security/provider/PolicyFile/getinstance/getinstance.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/provider/PolicyFile/getinstance/getinstance.sh Mon Dec 17 08:30:06 2012 -0500
@@ -92,7 +92,7 @@
${TESTJAVA}${FS}bin${FS}javac -d ${TESTCLASSES}${FS}app \
${TESTSRC}${FS}GetInstance.java
-${TESTJAVA}${FS}bin${FS}java \
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} \
-Xbootclasspath/a:"${TESTCLASSES}${FS}boot" \
-classpath "${TESTCLASSES}${FS}app" -Djava.security.manager \
-Djava.security.policy=GetInstance.policy \
@@ -106,7 +106,7 @@
echo "Failed on first test"
fi
-${TESTJAVA}${FS}bin${FS}java \
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} \
-classpath "${TESTCLASSES}${FS}boot${PS}${TESTCLASSES}${FS}app" \
-Djava.security.manager \
-Djava.security.policy=GetInstance.policy \
--- a/jdk/test/sun/security/ssl/com/sun/net/ssl/internal/ssl/EngineArgs/DebugReportsOneExtraByte.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/ssl/com/sun/net/ssl/internal/ssl/EngineArgs/DebugReportsOneExtraByte.sh Mon Dec 17 08:30:06 2012 -0500
@@ -59,7 +59,7 @@
echo "${STRING}"
echo "========="
-${TESTJAVA}${FS}bin${FS}java -Djavax.net.debug=all \
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -Djavax.net.debug=all \
-Dtest.src=${TESTSRC} \
DebugReportsOneExtraByte 2>&1 | \
grep "${STRING}"
--- a/jdk/test/sun/security/ssl/com/sun/net/ssl/internal/ssl/SSLSocketImpl/NotifyHandshakeTest.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/ssl/com/sun/net/ssl/internal/ssl/SSLSocketImpl/NotifyHandshakeTest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -82,7 +82,7 @@
# This is the only thing we really care about as far as
# test status goes.
#
-${TESTJAVA}${FILESEP}bin${FILESEP}java \
+${TESTJAVA}${FILESEP}bin${FILESEP}java ${TESTVMOPTS} \
-Dtest.src=${TESTSRC} \
-classpath "com.jar${PATHSEP}edu.jar" \
-Djava.security.manager \
--- a/jdk/test/sun/security/ssl/sun/net/www/protocol/https/HttpsURLConnection/PostThruProxy.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/ssl/sun/net/www/protocol/https/HttpsURLConnection/PostThruProxy.sh Mon Dec 17 08:30:06 2012 -0500
@@ -52,5 +52,5 @@
${TESTJAVA}${FS}bin${FS}javac -d . ${TESTSRC}${FS}OriginServer.java \
${TESTSRC}${FS}ProxyTunnelServer.java ${TESTSRC}${FS}PostThruProxy.java
-${TESTJAVA}${FS}bin${FS}java PostThruProxy ${HOSTNAME} ${TESTSRC}
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} PostThruProxy ${HOSTNAME} ${TESTSRC}
exit
--- a/jdk/test/sun/security/ssl/sun/net/www/protocol/https/HttpsURLConnection/PostThruProxyWithAuth.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/ssl/sun/net/www/protocol/https/HttpsURLConnection/PostThruProxyWithAuth.sh Mon Dec 17 08:30:06 2012 -0500
@@ -53,5 +53,5 @@
${TESTJAVA}${FS}bin${FS}javac -d . ${TESTSRC}${FS}OriginServer.java \
${TESTSRC}${FS}ProxyTunnelServer.java \
${TESTSRC}${FS}PostThruProxyWithAuth.java
-${TESTJAVA}${FS}bin${FS}java PostThruProxyWithAuth ${HOSTNAME} ${TESTSRC}
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} PostThruProxyWithAuth ${HOSTNAME} ${TESTSRC}
exit
--- a/jdk/test/sun/security/tools/jarsigner/emptymanifest.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/tools/jarsigner/emptymanifest.sh Mon Dec 17 08:30:06 2012 -0500
@@ -65,7 +65,7 @@
}
EOF
$JAVAC CrLf.java
-$JAVA CrLf > META-INF${FS}MANIFEST.MF
+$JAVA ${TESTVMOPTS} CrLf > META-INF${FS}MANIFEST.MF
zip $JFILE META-INF${FS}MANIFEST.MF A B
$KT -alias a -dname CN=a -keyalg rsa -genkey -validity 300
--- a/jdk/test/sun/security/tools/jarsigner/ts.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/tools/jarsigner/ts.sh Mon Dec 17 08:30:06 2012 -0500
@@ -87,5 +87,5 @@
$KT -alias tsbad3 -importcert
$JAVAC -d . ${TESTSRC}/TimestampCheck.java
-$JAVA TimestampCheck
+$JAVA ${TESTVMOPTS} TimestampCheck
--- a/jdk/test/sun/security/tools/keytool/printssl.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/tools/keytool/printssl.sh Mon Dec 17 08:30:06 2012 -0500
@@ -53,7 +53,7 @@
esac
${TESTJAVA}${FS}bin${FS}javac -d . ${TESTSRC}${FS}PrintSSL.java || exit 10
-${TESTJAVA}${FS}bin${FS}java -Dtest.src=$TESTSRC PrintSSL | ( read PORT; ${TESTJAVA}${FS}bin${FS}keytool -printcert -sslserver localhost:$PORT )
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -Dtest.src=$TESTSRC PrintSSL | ( read PORT; ${TESTJAVA}${FS}bin${FS}keytool -printcert -sslserver localhost:$PORT )
status=$?
rm PrintSSL*.class
--- a/jdk/test/sun/security/tools/keytool/standard.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/tools/keytool/standard.sh Mon Dec 17 08:30:06 2012 -0500
@@ -58,7 +58,7 @@
${TESTJAVA}${FS}bin${FS}javac -d . -XDignore.symbol.file ${TESTSRC}${FS}KeyToolTest.java || exit 10
-echo | ${TESTJAVA}${FS}bin${FS}java -Dfile KeyToolTest
+echo | ${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -Dfile KeyToolTest
status=$?
rm HumanInputStream*.class
--- a/jdk/test/sun/security/validator/certreplace.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/validator/certreplace.sh Mon Dec 17 08:30:06 2012 -0500
@@ -82,4 +82,4 @@
# 5. Build and run test
$JAVAC -d . ${TESTSRC}${FS}CertReplace.java
-$JAVA CertReplace certreplace.jks certreplace.certs
+$JAVA ${TESTVMOPTS} CertReplace certreplace.jks certreplace.certs
--- a/jdk/test/sun/security/validator/samedn.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/security/validator/samedn.sh Mon Dec 17 08:30:06 2012 -0500
@@ -78,5 +78,5 @@
# Check both, one of them might be dropped out of map in old codes.
$JAVAC -d . ${TESTSRC}${FS}CertReplace.java
-$JAVA CertReplace samedn.jks samedn1.certs || exit 1
-$JAVA CertReplace samedn.jks samedn2.certs || exit 2
+$JAVA ${TESTVMOPTS} CertReplace samedn.jks samedn1.certs || exit 1
+$JAVA ${TESTVMOPTS} CertReplace samedn.jks samedn2.certs || exit 2
--- a/jdk/test/sun/text/resources/LocaleData Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/text/resources/LocaleData Mon Dec 17 08:30:06 2012 -0500
@@ -7074,3 +7074,586 @@
# bug 7189611
CurrencyNames/es_VE/VEF=Bs.F.
+
+# rfe 8000983 (narrow names support)
+FormatData//DayNarrows/0=S
+FormatData//DayNarrows/1=M
+FormatData//DayNarrows/2=T
+FormatData//DayNarrows/3=W
+FormatData//DayNarrows/4=T
+FormatData//DayNarrows/5=F
+FormatData//DayNarrows/6=S
+FormatData//narrow.AmPmMarkers/0=a
+FormatData//narrow.AmPmMarkers/1=p
+FormatData//narrow.Eras/0=B
+FormatData//narrow.Eras/1=A
+FormatData//buddhist.narrow.Eras/0=BC
+FormatData//buddhist.narrow.Eras/1=B.E.
+FormatData//japanese.narrow.Eras/0=
+FormatData//japanese.narrow.Eras/1=M
+FormatData//japanese.narrow.Eras/2=T
+FormatData//japanese.narrow.Eras/3=S
+FormatData//japanese.narrow.Eras/4=H
+
+FormatData/ar/DayNarrows/0=\u062d
+FormatData/ar/DayNarrows/1=\u0646
+FormatData/ar/DayNarrows/2=\u062b
+FormatData/ar/DayNarrows/3=\u0631
+FormatData/ar/DayNarrows/4=\u062e
+FormatData/ar/DayNarrows/5=\u062c
+FormatData/ar/DayNarrows/6=\u0633
+
+FormatData/be/standalone.MonthNarrows/0=\u0441
+FormatData/be/standalone.MonthNarrows/1=\u043b
+FormatData/be/standalone.MonthNarrows/2=\u0441
+FormatData/be/standalone.MonthNarrows/3=\u043a
+FormatData/be/standalone.MonthNarrows/4=\u043c
+FormatData/be/standalone.MonthNarrows/5=\u0447
+FormatData/be/standalone.MonthNarrows/6=\u043b
+FormatData/be/standalone.MonthNarrows/7=\u0436
+FormatData/be/standalone.MonthNarrows/8=\u0432
+FormatData/be/standalone.MonthNarrows/9=\u043a
+FormatData/be/standalone.MonthNarrows/10=\u043b
+FormatData/be/standalone.MonthNarrows/11=\u0441
+FormatData/be/standalone.MonthNarrows/12=
+FormatData/be/DayNarrows/0=\u043d
+FormatData/be/DayNarrows/1=\u043f
+FormatData/be/DayNarrows/2=\u0430
+FormatData/be/DayNarrows/3=\u0441
+FormatData/be/DayNarrows/4=\u0447
+FormatData/be/DayNarrows/5=\u043f
+FormatData/be/DayNarrows/6=\u0441
+
+FormatData/bg/DayNarrows/0=\u043d
+FormatData/bg/DayNarrows/1=\u043f
+FormatData/bg/DayNarrows/2=\u0432
+FormatData/bg/DayNarrows/3=\u0441
+FormatData/bg/DayNarrows/4=\u0447
+FormatData/bg/DayNarrows/5=\u043f
+FormatData/bg/DayNarrows/6=\u0441
+
+FormatData/ca/standalone.MonthNarrows/0=g
+FormatData/ca/standalone.MonthNarrows/1=f
+FormatData/ca/standalone.MonthNarrows/2=m
+FormatData/ca/standalone.MonthNarrows/3=a
+FormatData/ca/standalone.MonthNarrows/4=m
+FormatData/ca/standalone.MonthNarrows/5=j
+FormatData/ca/standalone.MonthNarrows/6=j
+FormatData/ca/standalone.MonthNarrows/7=a
+FormatData/ca/standalone.MonthNarrows/8=s
+FormatData/ca/standalone.MonthNarrows/9=o
+FormatData/ca/standalone.MonthNarrows/10=n
+FormatData/ca/standalone.MonthNarrows/11=d
+FormatData/ca/standalone.MonthNarrows/12=
+FormatData/ca/DayNarrows/0=G
+# Note: "L" is a contribued item in CLDR
+FormatData/ca/DayNarrows/1=L
+FormatData/ca/DayNarrows/2=T
+FormatData/ca/DayNarrows/3=C
+FormatData/ca/DayNarrows/4=J
+FormatData/ca/DayNarrows/5=V
+FormatData/ca/DayNarrows/6=S
+FormatData/ca/standalone.DayNarrows/0=g
+FormatData/ca/standalone.DayNarrows/1=l
+FormatData/ca/standalone.DayNarrows/2=t
+FormatData/ca/standalone.DayNarrows/3=c
+FormatData/ca/standalone.DayNarrows/4=j
+FormatData/ca/standalone.DayNarrows/5=v
+FormatData/ca/standalone.DayNarrows/6=s
+
+FormatData/cs/DayNarrows/0=N
+FormatData/cs/DayNarrows/1=P
+FormatData/cs/DayNarrows/2=\u00da
+FormatData/cs/DayNarrows/3=S
+FormatData/cs/DayNarrows/4=\u010c
+FormatData/cs/DayNarrows/5=P
+FormatData/cs/DayNarrows/6=S
+
+FormatData/da/DayNarrows/0=S
+FormatData/da/DayNarrows/1=M
+FormatData/da/DayNarrows/2=T
+FormatData/da/DayNarrows/3=O
+FormatData/da/DayNarrows/4=T
+FormatData/da/DayNarrows/5=F
+FormatData/da/DayNarrows/6=L
+
+FormatData/de/DayNarrows/0=S
+FormatData/de/DayNarrows/1=M
+FormatData/de/DayNarrows/2=D
+FormatData/de/DayNarrows/3=M
+FormatData/de/DayNarrows/4=D
+FormatData/de/DayNarrows/5=F
+FormatData/de/DayNarrows/6=S
+
+FormatData/el/DayNarrows/0=\u039a
+FormatData/el/DayNarrows/1=\u0394
+FormatData/el/DayNarrows/2=\u03a4
+FormatData/el/DayNarrows/3=\u03a4
+FormatData/el/DayNarrows/4=\u03a0
+FormatData/el/DayNarrows/5=\u03a0
+FormatData/el/DayNarrows/6=\u03a3
+
+FormatData/es/DayNarrows/0=D
+FormatData/es/DayNarrows/1=L
+FormatData/es/DayNarrows/2=M
+FormatData/es/DayNarrows/3=X
+FormatData/es/DayNarrows/4=J
+FormatData/es/DayNarrows/5=V
+FormatData/es/DayNarrows/6=S
+
+FormatData/et/DayNarrows/0=P
+FormatData/et/DayNarrows/1=E
+FormatData/et/DayNarrows/2=T
+FormatData/et/DayNarrows/3=K
+FormatData/et/DayNarrows/4=N
+FormatData/et/DayNarrows/5=R
+FormatData/et/DayNarrows/6=L
+
+FormatData/fi/standalone.MonthNarrows/0=T
+FormatData/fi/standalone.MonthNarrows/1=H
+FormatData/fi/standalone.MonthNarrows/2=M
+FormatData/fi/standalone.MonthNarrows/3=H
+FormatData/fi/standalone.MonthNarrows/4=T
+FormatData/fi/standalone.MonthNarrows/5=K
+FormatData/fi/standalone.MonthNarrows/6=H
+FormatData/fi/standalone.MonthNarrows/7=E
+FormatData/fi/standalone.MonthNarrows/8=S
+FormatData/fi/standalone.MonthNarrows/9=L
+FormatData/fi/standalone.MonthNarrows/10=M
+FormatData/fi/standalone.MonthNarrows/11=J
+FormatData/fi/standalone.MonthNarrows/12=
+FormatData/fi/DayNarrows/0=S
+FormatData/fi/DayNarrows/1=M
+FormatData/fi/DayNarrows/2=T
+FormatData/fi/DayNarrows/3=K
+FormatData/fi/DayNarrows/4=T
+FormatData/fi/DayNarrows/5=P
+FormatData/fi/DayNarrows/6=L
+FormatData/fi/standalone.DayNarrows/0=S
+FormatData/fi/standalone.DayNarrows/1=M
+FormatData/fi/standalone.DayNarrows/2=T
+FormatData/fi/standalone.DayNarrows/3=K
+FormatData/fi/standalone.DayNarrows/4=T
+FormatData/fi/standalone.DayNarrows/5=P
+FormatData/fi/standalone.DayNarrows/6=L
+FormatData/fi/narrow.AmPmMarkers/0=ap.
+FormatData/fi/narrow.AmPmMarkers/1=ip.
+
+FormatData/fr/DayNarrows/0=D
+FormatData/fr/DayNarrows/1=L
+FormatData/fr/DayNarrows/2=M
+FormatData/fr/DayNarrows/3=M
+FormatData/fr/DayNarrows/4=J
+FormatData/fr/DayNarrows/5=V
+FormatData/fr/DayNarrows/6=S
+
+FormatData/hi_IN/DayNarrows/0=\u0930
+FormatData/hi_IN/DayNarrows/1=\u0938\u094b
+FormatData/hi_IN/DayNarrows/2=\u092e\u0902
+FormatData/hi_IN/DayNarrows/3=\u092c\u0941
+FormatData/hi_IN/DayNarrows/4=\u0917\u0941
+FormatData/hi_IN/DayNarrows/5=\u0936\u0941
+FormatData/hi_IN/DayNarrows/6=\u0936
+
+FormatData/hr/standalone.MonthNarrows/0=1.
+FormatData/hr/standalone.MonthNarrows/1=2.
+FormatData/hr/standalone.MonthNarrows/2=3.
+FormatData/hr/standalone.MonthNarrows/3=4.
+FormatData/hr/standalone.MonthNarrows/4=5.
+FormatData/hr/standalone.MonthNarrows/5=6.
+FormatData/hr/standalone.MonthNarrows/6=7.
+FormatData/hr/standalone.MonthNarrows/7=8.
+FormatData/hr/standalone.MonthNarrows/8=9.
+FormatData/hr/standalone.MonthNarrows/9=10.
+FormatData/hr/standalone.MonthNarrows/10=11.
+FormatData/hr/standalone.MonthNarrows/11=12.
+FormatData/hr/standalone.MonthNarrows/12=
+FormatData/hr/DayNarrows/0=N
+FormatData/hr/DayNarrows/1=P
+FormatData/hr/DayNarrows/2=U
+FormatData/hr/DayNarrows/3=S
+FormatData/hr/DayNarrows/4=\u010c
+FormatData/hr/DayNarrows/5=P
+FormatData/hr/DayNarrows/6=S
+FormatData/hr/standalone.DayNarrows/0=n
+FormatData/hr/standalone.DayNarrows/1=p
+FormatData/hr/standalone.DayNarrows/2=u
+FormatData/hr/standalone.DayNarrows/3=s
+FormatData/hr/standalone.DayNarrows/4=\u010d
+FormatData/hr/standalone.DayNarrows/5=p
+FormatData/hr/standalone.DayNarrows/6=s
+
+FormatData/hu/DayNarrows/0=V
+FormatData/hu/DayNarrows/1=H
+FormatData/hu/DayNarrows/2=K
+FormatData/hu/DayNarrows/3=Sz
+FormatData/hu/DayNarrows/4=Cs
+FormatData/hu/DayNarrows/5=P
+FormatData/hu/DayNarrows/6=Sz
+
+FormatData/is/standalone.MonthNarrows/0=j
+FormatData/is/standalone.MonthNarrows/1=f
+FormatData/is/standalone.MonthNarrows/2=m
+FormatData/is/standalone.MonthNarrows/3=a
+FormatData/is/standalone.MonthNarrows/4=m
+FormatData/is/standalone.MonthNarrows/5=j
+FormatData/is/standalone.MonthNarrows/6=j
+FormatData/is/standalone.MonthNarrows/7=\u00e1
+FormatData/is/standalone.MonthNarrows/8=s
+FormatData/is/standalone.MonthNarrows/9=o
+FormatData/is/standalone.MonthNarrows/10=n
+FormatData/is/standalone.MonthNarrows/11=d
+FormatData/is/standalone.MonthNarrows/12=
+FormatData/is/DayNarrows/0=S
+FormatData/is/DayNarrows/1=M
+FormatData/is/DayNarrows/2=\u00de
+FormatData/is/DayNarrows/3=M
+FormatData/is/DayNarrows/4=F
+FormatData/is/DayNarrows/5=F
+FormatData/is/DayNarrows/6=L
+FormatData/is/standalone.DayNarrows/0=s
+FormatData/is/standalone.DayNarrows/1=m
+FormatData/is/standalone.DayNarrows/2=\u00fe
+FormatData/is/standalone.DayNarrows/3=m
+FormatData/is/standalone.DayNarrows/4=f
+FormatData/is/standalone.DayNarrows/5=f
+FormatData/is/standalone.DayNarrows/6=l
+
+FormatData/it/DayNarrows/0=D
+FormatData/it/DayNarrows/1=L
+FormatData/it/DayNarrows/2=M
+FormatData/it/DayNarrows/3=M
+FormatData/it/DayNarrows/4=G
+FormatData/it/DayNarrows/5=V
+FormatData/it/DayNarrows/6=S
+
+FormatData/iw/DayNarrows/0=\u05d0
+FormatData/iw/DayNarrows/1=\u05d1
+FormatData/iw/DayNarrows/2=\u05d2
+FormatData/iw/DayNarrows/3=\u05d3
+FormatData/iw/DayNarrows/4=\u05d4
+FormatData/iw/DayNarrows/5=\u05d5
+FormatData/iw/DayNarrows/6=\u05e9
+FormatData/iw/standalone.DayNarrows/0=\u05d0
+FormatData/iw/standalone.DayNarrows/1=\u05d1
+FormatData/iw/standalone.DayNarrows/2=\u05d2
+FormatData/iw/standalone.DayNarrows/3=\u05d3
+FormatData/iw/standalone.DayNarrows/4=\u05d4
+FormatData/iw/standalone.DayNarrows/5=\u05d5
+FormatData/iw/standalone.DayNarrows/6=\u05e9
+
+FormatData/ja/DayNarrows/0=\u65e5
+FormatData/ja/DayNarrows/1=\u6708
+FormatData/ja/DayNarrows/2=\u706b
+FormatData/ja/DayNarrows/3=\u6c34
+FormatData/ja/DayNarrows/4=\u6728
+FormatData/ja/DayNarrows/5=\u91d1
+FormatData/ja/DayNarrows/6=\u571f
+
+FormatData/ko/DayNarrows/0=\uc77c
+FormatData/ko/DayNarrows/1=\uc6d4
+FormatData/ko/DayNarrows/2=\ud654
+FormatData/ko/DayNarrows/3=\uc218
+FormatData/ko/DayNarrows/4=\ubaa9
+FormatData/ko/DayNarrows/5=\uae08
+FormatData/ko/DayNarrows/6=\ud1a0
+
+FormatData/lt/standalone.MonthNarrows/0=S
+FormatData/lt/standalone.MonthNarrows/1=V
+FormatData/lt/standalone.MonthNarrows/2=K
+FormatData/lt/standalone.MonthNarrows/3=B
+FormatData/lt/standalone.MonthNarrows/4=G
+FormatData/lt/standalone.MonthNarrows/5=B
+FormatData/lt/standalone.MonthNarrows/6=L
+FormatData/lt/standalone.MonthNarrows/7=R
+FormatData/lt/standalone.MonthNarrows/8=R
+FormatData/lt/standalone.MonthNarrows/9=S
+FormatData/lt/standalone.MonthNarrows/10=L
+FormatData/lt/standalone.MonthNarrows/11=G
+FormatData/lt/standalone.MonthNarrows/12=
+
+FormatData/lt/DayNarrows/0=S
+FormatData/lt/DayNarrows/1=P
+FormatData/lt/DayNarrows/2=A
+FormatData/lt/DayNarrows/3=T
+FormatData/lt/DayNarrows/4=K
+FormatData/lt/DayNarrows/5=P
+FormatData/lt/DayNarrows/6=\u0160
+FormatData/lt/standalone.DayNarrows/0=S
+FormatData/lt/standalone.DayNarrows/1=P
+FormatData/lt/standalone.DayNarrows/2=A
+FormatData/lt/standalone.DayNarrows/3=T
+FormatData/lt/standalone.DayNarrows/4=K
+FormatData/lt/standalone.DayNarrows/5=P
+FormatData/lt/standalone.DayNarrows/6=\u0160
+
+FormatData/lv/DayNarrows/0=S
+FormatData/lv/DayNarrows/1=P
+FormatData/lv/DayNarrows/2=O
+FormatData/lv/DayNarrows/3=T
+FormatData/lv/DayNarrows/4=C
+FormatData/lv/DayNarrows/5=P
+FormatData/lv/DayNarrows/6=S
+
+FormatData/mk/DayNarrows/0=\u043d
+FormatData/mk/DayNarrows/1=\u043f
+FormatData/mk/DayNarrows/2=\u0432
+FormatData/mk/DayNarrows/3=\u0441
+FormatData/mk/DayNarrows/4=\u0447
+FormatData/mk/DayNarrows/5=\u043f
+FormatData/mk/DayNarrows/6=\u0441
+
+FormatData/ms/standalone.MonthNarrows/0=J
+FormatData/ms/standalone.MonthNarrows/1=F
+FormatData/ms/standalone.MonthNarrows/2=M
+FormatData/ms/standalone.MonthNarrows/3=A
+FormatData/ms/standalone.MonthNarrows/4=M
+FormatData/ms/standalone.MonthNarrows/5=J
+FormatData/ms/standalone.MonthNarrows/6=J
+FormatData/ms/standalone.MonthNarrows/7=O
+FormatData/ms/standalone.MonthNarrows/8=S
+FormatData/ms/standalone.MonthNarrows/9=O
+FormatData/ms/standalone.MonthNarrows/10=N
+FormatData/ms/standalone.MonthNarrows/11=D
+FormatData/ms/standalone.MonthNarrows/12=
+FormatData/ms/DayNarrows/0=A
+FormatData/ms/DayNarrows/1=I
+FormatData/ms/DayNarrows/2=S
+FormatData/ms/DayNarrows/3=R
+FormatData/ms/DayNarrows/4=K
+FormatData/ms/DayNarrows/5=J
+FormatData/ms/DayNarrows/6=S
+FormatData/ms/standalone.DayNarrows/0=A
+FormatData/ms/standalone.DayNarrows/1=I
+FormatData/ms/standalone.DayNarrows/2=S
+FormatData/ms/standalone.DayNarrows/3=R
+FormatData/ms/standalone.DayNarrows/4=K
+FormatData/ms/standalone.DayNarrows/5=J
+FormatData/ms/standalone.DayNarrows/6=S
+
+FormatData/mt/DayNarrows/0=\u0126
+FormatData/mt/DayNarrows/1=T
+FormatData/mt/DayNarrows/2=T
+FormatData/mt/DayNarrows/3=E
+FormatData/mt/DayNarrows/4=\u0126
+FormatData/mt/DayNarrows/5=\u0120
+FormatData/mt/DayNarrows/6=S
+
+FormatData/nl/DayNarrows/0=Z
+FormatData/nl/DayNarrows/1=M
+FormatData/nl/DayNarrows/2=D
+FormatData/nl/DayNarrows/3=W
+FormatData/nl/DayNarrows/4=D
+FormatData/nl/DayNarrows/5=V
+FormatData/nl/DayNarrows/6=Z
+
+FormatData/pl/DayNarrows/0=N
+FormatData/pl/DayNarrows/1=P
+FormatData/pl/DayNarrows/2=W
+FormatData/pl/DayNarrows/3=\u015a
+FormatData/pl/DayNarrows/4=C
+FormatData/pl/DayNarrows/5=P
+FormatData/pl/DayNarrows/6=S
+
+FormatData/pt/DayNarrows/0=D
+FormatData/pt/DayNarrows/1=S
+FormatData/pt/DayNarrows/2=T
+FormatData/pt/DayNarrows/3=Q
+FormatData/pt/DayNarrows/4=Q
+FormatData/pt/DayNarrows/5=S
+FormatData/pt/DayNarrows/6=S
+
+FormatData/ro/standalone.MonthNarrows/0=I
+FormatData/ro/standalone.MonthNarrows/1=F
+FormatData/ro/standalone.MonthNarrows/2=M
+FormatData/ro/standalone.MonthNarrows/3=A
+FormatData/ro/standalone.MonthNarrows/4=M
+FormatData/ro/standalone.MonthNarrows/5=I
+FormatData/ro/standalone.MonthNarrows/6=I
+FormatData/ro/standalone.MonthNarrows/7=A
+FormatData/ro/standalone.MonthNarrows/8=S
+FormatData/ro/standalone.MonthNarrows/9=O
+FormatData/ro/standalone.MonthNarrows/10=N
+FormatData/ro/standalone.MonthNarrows/11=D
+FormatData/ro/standalone.MonthNarrows/12=
+# commented out DayNarrows due to mostly undefined
+#FormatData/ro/DayNarrows/0=D
+#FormatData/ro/DayNarrows/1=
+#FormatData/ro/DayNarrows/2=
+#FormatData/ro/DayNarrows/3=
+#FormatData/ro/DayNarrows/4=
+#FormatData/ro/DayNarrows/5=
+#FormatData/ro/DayNarrows/6=
+FormatData/ro/standalone.DayNarrows/0=D
+FormatData/ro/standalone.DayNarrows/1=L
+FormatData/ro/standalone.DayNarrows/2=M
+FormatData/ro/standalone.DayNarrows/3=M
+FormatData/ro/standalone.DayNarrows/4=J
+FormatData/ro/standalone.DayNarrows/5=V
+FormatData/ro/standalone.DayNarrows/6=S
+
+FormatData/ru/DayNarrows/0=\u0412
+FormatData/ru/DayNarrows/1=\u041f\u043d
+FormatData/ru/DayNarrows/2=\u0412\u0442
+FormatData/ru/DayNarrows/3=\u0421
+FormatData/ru/DayNarrows/4=\u0427
+FormatData/ru/DayNarrows/5=\u041f
+# Note: "sat" is an contributed item in CLDR.
+FormatData/ru/DayNarrows/6=\u0421
+
+FormatData/ru/standalone.DayNarrows/0=\u0412
+FormatData/ru/standalone.DayNarrows/1=\u041f
+FormatData/ru/standalone.DayNarrows/2=\u0412
+FormatData/ru/standalone.DayNarrows/3=\u0421
+FormatData/ru/standalone.DayNarrows/4=\u0427
+FormatData/ru/standalone.DayNarrows/5=\u041f
+FormatData/ru/standalone.DayNarrows/6=\u0421
+
+FormatData/sk/DayNarrows/0=N
+FormatData/sk/DayNarrows/1=P
+FormatData/sk/DayNarrows/2=U
+FormatData/sk/DayNarrows/3=S
+FormatData/sk/DayNarrows/4=\u0160
+FormatData/sk/DayNarrows/5=P
+FormatData/sk/DayNarrows/6=S
+
+FormatData/sl/DayNarrows/0=n
+FormatData/sl/DayNarrows/1=p
+FormatData/sl/DayNarrows/2=t
+FormatData/sl/DayNarrows/3=s
+FormatData/sl/DayNarrows/4=\u010d
+FormatData/sl/DayNarrows/5=p
+FormatData/sl/DayNarrows/6=s
+
+FormatData/sq/DayNarrows/0=D
+FormatData/sq/DayNarrows/1=H
+FormatData/sq/DayNarrows/2=M
+FormatData/sq/DayNarrows/3=M
+FormatData/sq/DayNarrows/4=E
+FormatData/sq/DayNarrows/5=P
+FormatData/sq/DayNarrows/6=S
+
+FormatData/sr/DayNarrows/0=\u043d
+FormatData/sr/DayNarrows/1=\u043f
+FormatData/sr/DayNarrows/2=\u0443
+FormatData/sr/DayNarrows/3=\u0441
+FormatData/sr/DayNarrows/4=\u0447
+FormatData/sr/DayNarrows/5=\u043f
+FormatData/sr/DayNarrows/6=\u0441
+FormatData/sr/short.Eras/0=\u043f. \u043d. \u0435.
+FormatData/sr/short.Eras/1=\u043d. \u0435.
+FormatData/sr/narrow.Eras/0=\u043f.\u043d.\u0435.
+FormatData/sr/narrow.Eras/1=\u043d.\u0435.
+
+FormatData/sv/standalone.MonthNarrows/0=J
+FormatData/sv/standalone.MonthNarrows/1=F
+FormatData/sv/standalone.MonthNarrows/2=M
+FormatData/sv/standalone.MonthNarrows/3=A
+FormatData/sv/standalone.MonthNarrows/4=M
+FormatData/sv/standalone.MonthNarrows/5=J
+FormatData/sv/standalone.MonthNarrows/6=J
+FormatData/sv/standalone.MonthNarrows/7=A
+FormatData/sv/standalone.MonthNarrows/8=S
+FormatData/sv/standalone.MonthNarrows/9=O
+FormatData/sv/standalone.MonthNarrows/10=N
+FormatData/sv/standalone.MonthNarrows/11=D
+FormatData/sv/standalone.MonthNarrows/12=
+FormatData/sv/DayNarrows/0=S
+FormatData/sv/DayNarrows/1=M
+FormatData/sv/DayNarrows/2=T
+FormatData/sv/DayNarrows/3=O
+FormatData/sv/DayNarrows/4=T
+FormatData/sv/DayNarrows/5=F
+FormatData/sv/DayNarrows/6=L
+FormatData/sv/standalone.DayNarrows/0=S
+FormatData/sv/standalone.DayNarrows/1=M
+FormatData/sv/standalone.DayNarrows/2=T
+FormatData/sv/standalone.DayNarrows/3=O
+FormatData/sv/standalone.DayNarrows/4=T
+FormatData/sv/standalone.DayNarrows/5=F
+FormatData/sv/standalone.DayNarrows/6=L
+FormatData/sv/narrow.Eras/0=f.Kr.
+FormatData/sv/narrow.Eras/1=e.Kr.
+FormatData/sv/narrow.AmPmMarkers/0=f
+FormatData/sv/narrow.AmPmMarkers/1=e
+
+FormatData/th/standalone.MonthNarrows/0=\u0e21.\u0e04.
+FormatData/th/standalone.MonthNarrows/1=\u0e01.\u0e1e.
+FormatData/th/standalone.MonthNarrows/2=\u0e21\u0e35.\u0e04.
+FormatData/th/standalone.MonthNarrows/3=\u0e40\u0e21.\u0e22.
+FormatData/th/standalone.MonthNarrows/4=\u0e1e.\u0e04.
+FormatData/th/standalone.MonthNarrows/5=\u0e21\u0e34.\u0e22.
+FormatData/th/standalone.MonthNarrows/6=\u0e01.\u0e04.
+FormatData/th/standalone.MonthNarrows/7=\u0e2a.\u0e04.
+FormatData/th/standalone.MonthNarrows/8=\u0e01.\u0e22.
+FormatData/th/standalone.MonthNarrows/9=\u0e15.\u0e04.
+FormatData/th/standalone.MonthNarrows/10=\u0e1e.\u0e22.
+FormatData/th/standalone.MonthNarrows/11=\u0e18.\u0e04.
+FormatData/th/standalone.MonthNarrows/12=
+FormatData/th/DayNarrows/0=\u0e2d
+FormatData/th/DayNarrows/1=\u0e08
+FormatData/th/DayNarrows/2=\u0e2d
+FormatData/th/DayNarrows/3=\u0e1e
+FormatData/th/DayNarrows/4=\u0e1e
+FormatData/th/DayNarrows/5=\u0e28
+FormatData/th/DayNarrows/6=\u0e2a
+FormatData/th/narrow.Eras/0=\u0e01\u0e48\u0e2d\u0e19 \u0e04.\u0e28.
+FormatData/th/narrow.Eras/1=\u0e04.\u0e28.
+
+FormatData/tr/standalone.MonthNarrows/0=O
+FormatData/tr/standalone.MonthNarrows/1=\u015e
+FormatData/tr/standalone.MonthNarrows/2=M
+FormatData/tr/standalone.MonthNarrows/3=N
+FormatData/tr/standalone.MonthNarrows/4=M
+FormatData/tr/standalone.MonthNarrows/5=H
+FormatData/tr/standalone.MonthNarrows/6=T
+FormatData/tr/standalone.MonthNarrows/7=A
+FormatData/tr/standalone.MonthNarrows/8=E
+FormatData/tr/standalone.MonthNarrows/9=E
+FormatData/tr/standalone.MonthNarrows/10=K
+FormatData/tr/standalone.MonthNarrows/11=A
+FormatData/tr/standalone.MonthNarrows/12=
+FormatData/tr/DayNarrows/0=P
+FormatData/tr/DayNarrows/1=P
+FormatData/tr/DayNarrows/2=S
+FormatData/tr/DayNarrows/3=\u00c7
+FormatData/tr/DayNarrows/4=P
+FormatData/tr/DayNarrows/5=C
+FormatData/tr/DayNarrows/6=C
+
+FormatData/uk/DayNarrows/0=\u041d
+FormatData/uk/DayNarrows/1=\u041f
+FormatData/uk/DayNarrows/2=\u0412
+FormatData/uk/DayNarrows/3=\u0421
+FormatData/uk/DayNarrows/4=\u0427
+FormatData/uk/DayNarrows/5=\u041f
+FormatData/uk/DayNarrows/6=\u0421
+
+FormatData/vi/DayNarrows/0=CN
+FormatData/vi/DayNarrows/1=T2
+FormatData/vi/DayNarrows/2=T3
+FormatData/vi/DayNarrows/3=T4
+FormatData/vi/DayNarrows/4=T5
+FormatData/vi/DayNarrows/5=T6
+FormatData/vi/DayNarrows/6=T7
+
+FormatData/zh/standalone.MonthNarrows/0=1\u6708
+FormatData/zh/standalone.MonthNarrows/1=2\u6708
+FormatData/zh/standalone.MonthNarrows/2=3\u6708
+FormatData/zh/standalone.MonthNarrows/3=4\u6708
+FormatData/zh/standalone.MonthNarrows/4=5\u6708
+FormatData/zh/standalone.MonthNarrows/5=6\u6708
+FormatData/zh/standalone.MonthNarrows/6=7\u6708
+FormatData/zh/standalone.MonthNarrows/7=8\u6708
+FormatData/zh/standalone.MonthNarrows/8=9\u6708
+FormatData/zh/standalone.MonthNarrows/9=10\u6708
+FormatData/zh/standalone.MonthNarrows/10=11\u6708
+FormatData/zh/standalone.MonthNarrows/11=12\u6708
+FormatData/zh/standalone.MonthNarrows/12=
+FormatData/zh/DayNarrows/0=\u65e5
+FormatData/zh/DayNarrows/1=\u4e00
+FormatData/zh/DayNarrows/2=\u4e8c
+FormatData/zh/DayNarrows/3=\u4e09
+FormatData/zh/DayNarrows/4=\u56db
+FormatData/zh/DayNarrows/5=\u4e94
+FormatData/zh/DayNarrows/6=\u516d
--- a/jdk/test/sun/text/resources/LocaleDataTest.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/text/resources/LocaleDataTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -34,7 +34,7 @@
* 6509039 6609737 6610748 6645271 6507067 6873931 6450945 6645268 6646611
* 6645405 6650730 6910489 6573250 6870908 6585666 6716626 6914413 6916787
* 6919624 6998391 7019267 7020960 7025837 7020583 7036905 7066203 7101495
- * 7003124 7085757 7028073 7171028 7189611
+ * 7003124 7085757 7028073 7171028 7189611 8000983
* @summary Verify locale data
*
*/
--- a/jdk/test/sun/tools/jrunscript/common.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/sun/tools/jrunscript/common.sh Mon Dec 17 08:30:06 2012 -0500
@@ -63,8 +63,4 @@
JRUNSCRIPT="${TESTJAVA}/bin/jrunscript"
JAVAC="${TESTJAVA}/bin/javac"
JAVA="${TESTJAVA}/bin/java"
- # needed to get full headless behavior on Mac
- if [ "$OS" = "Darwin" ]; then
- export AWT_TOOLKIT=XToolkit
- fi
}
--- a/jdk/test/tools/launcher/6842838/Test6842838.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/tools/launcher/6842838/Test6842838.sh Mon Dec 17 08:30:06 2012 -0500
@@ -68,8 +68,8 @@
fi
BADFILE=newbadjar.jar
-${JAVA_EXE} -version
-${JAVA_EXE} -cp ${TESTCLASSES} CreateBadJar ${BADFILE} "META-INF/MANIFEST.MF"
+${JAVA_EXE} ${TESTVMOPTS} -version
+${JAVA_EXE} ${TESTVMOPTS} -cp ${TESTCLASSES} CreateBadJar ${BADFILE} "META-INF/MANIFEST.MF"
LD_PRELOAD=${LIBUMEM} ${JAVA_EXE} -jar ${BADFILE} > test.out 2>&1
grep "Invalid or corrupt jarfile" test.out
--- a/jdk/test/tools/launcher/Arrrghs.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/tools/launcher/Arrrghs.java Mon Dec 17 08:30:06 2012 -0500
@@ -27,7 +27,7 @@
* 6894719 6968053 7151434 7146424
* @summary Argument parsing validation.
* @compile -XDignore.symbol.file Arrrghs.java
- * @run main Arrrghs
+ * @run main/othervm Arrrghs
*/
import java.io.BufferedReader;
@@ -204,8 +204,7 @@
// exiting the process prematurely can terminate the stderr.
scratchpad.add(javaCmd + " -version " + inArgs);
File batFile = new File("atest.bat");
- java.nio.file.Files.deleteIfExists(batFile.toPath());
- createFile(batFile, scratchpad);
+ createAFile(batFile, scratchpad);
TestResult tr = doExec(batFile.getName());
--- a/jdk/test/tools/launcher/MultipleJRE.sh Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/tools/launcher/MultipleJRE.sh Mon Dec 17 08:30:06 2012 -0500
@@ -49,8 +49,8 @@
exit 1
fi
-JAVAEXE="$TESTJAVA/bin/java"
-JAVA="$TESTJAVA/bin/java -classpath $TESTCLASSES"
+JAVAEXE="$TESTJAVA/bin/java ${TESTVMOPTS}"
+JAVA="$TESTJAVA/bin/java ${TESTVMOPTS} -classpath $TESTCLASSES"
JAR="$TESTJAVA/bin/jar"
OS=`uname -s`;
--- a/jdk/test/tools/launcher/TestHelper.java Mon Dec 17 08:28:27 2012 -0500
+++ b/jdk/test/tools/launcher/TestHelper.java Mon Dec 17 08:30:06 2012 -0500
@@ -358,6 +358,51 @@
Files.copy(src.toPath(), dst.toPath(), COPY_ATTRIBUTES, REPLACE_EXISTING);
}
+ /**
+ * Attempt to create a file at the given location. If an IOException
+ * occurs then back off for a moment and try again. When a number of
+ * attempts fail, give up and throw an exception.
+ */
+ void createAFile(File aFile, List<String> contents) throws IOException {
+ IOException cause = null;
+ for (int attempts = 0; attempts < 10; attempts++) {
+ try {
+ Files.write(aFile.getAbsoluteFile().toPath(), contents,
+ Charset.defaultCharset(), CREATE, TRUNCATE_EXISTING, WRITE);
+ if (cause != null) {
+ /*
+ * report attempts and errors that were encountered
+ * for diagnostic purposes
+ */
+ System.err.println("Created batch file " +
+ aFile + " in " + (attempts + 1) +
+ " attempts");
+ System.err.println("Errors encountered: " + cause);
+ cause.printStackTrace();
+ }
+ return;
+ } catch (IOException ioe) {
+ if (cause != null) {
+ // chain the exceptions so they all get reported for diagnostics
+ cause.addSuppressed(ioe);
+ } else {
+ cause = ioe;
+ }
+ }
+
+ try {
+ Thread.sleep(500);
+ } catch (InterruptedException ie) {
+ if (cause != null) {
+ // cause should alway be non-null here
+ ie.addSuppressed(cause);
+ }
+ throw new RuntimeException("Interrupted while creating batch file", ie);
+ }
+ }
+ throw new RuntimeException("Unable to create batch file", cause);
+ }
+
static void createFile(File outFile, List<String> content) throws IOException {
Files.write(outFile.getAbsoluteFile().toPath(), content,
Charset.defaultCharset(), CREATE_NEW);
--- a/langtools/.hgtags Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/.hgtags Mon Dec 17 08:30:06 2012 -0500
@@ -187,3 +187,5 @@
92e6f2190ca0567c857f85c3fb7a2be5adf079d0 jdk8-b63
e6ee43b3e2473798b17a556e9f11eebe25ab81d4 jdk8-b64
5f2faba89cac665e365c05074064ffc934a495eb jdk8-b65
+20230f8b0eef92a57043735fc2ca00fea7e510a0 jdk8-b66
+303b09787a69136cd2019f9edfed3f308572e9fc jdk8-b67
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/src/share/classes/com/sun/source/tree/IntersectionTypeTree.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.sun.source.tree;
+
+import java.util.List;
+
+/**
+ * A tree node for an intersection type in a cast expression.
+ *
+ * @author Maurizio Cimadamore
+ *
+ * @since 1.8
+ */
+public interface IntersectionTypeTree extends Tree {
+ List<? extends Tree> getBounds();
+}
--- a/langtools/src/share/classes/com/sun/source/tree/Tree.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/source/tree/Tree.java Mon Dec 17 08:30:06 2012 -0500
@@ -247,6 +247,11 @@
UNION_TYPE(UnionTypeTree.class),
/**
+ * Used for instances of {@link IntersectionTypeTree}.
+ */
+ INTERSECTION_TYPE(IntersectionTypeTree.class),
+
+ /**
* Used for instances of {@link TypeCastTree}.
*/
TYPE_CAST(TypeCastTree.class),
--- a/langtools/src/share/classes/com/sun/source/tree/TreeVisitor.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/source/tree/TreeVisitor.java Mon Dec 17 08:30:06 2012 -0500
@@ -98,6 +98,7 @@
R visitTry(TryTree node, P p);
R visitParameterizedType(ParameterizedTypeTree node, P p);
R visitUnionType(UnionTypeTree node, P p);
+ R visitIntersectionType(IntersectionTypeTree node, P p);
R visitArrayType(ArrayTypeTree node, P p);
R visitTypeCast(TypeCastTree node, P p);
R visitPrimitiveType(PrimitiveTypeTree node, P p);
--- a/langtools/src/share/classes/com/sun/source/util/SimpleTreeVisitor.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/source/util/SimpleTreeVisitor.java Mon Dec 17 08:30:06 2012 -0500
@@ -240,6 +240,10 @@
return defaultAction(node, p);
}
+ public R visitIntersectionType(IntersectionTypeTree node, P p) {
+ return defaultAction(node, p);
+ }
+
public R visitTypeParameter(TypeParameterTree node, P p) {
return defaultAction(node, p);
}
--- a/langtools/src/share/classes/com/sun/source/util/TreeScanner.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/source/util/TreeScanner.java Mon Dec 17 08:30:06 2012 -0500
@@ -371,6 +371,10 @@
return scan(node.getTypeAlternatives(), p);
}
+ public R visitIntersectionType(IntersectionTypeTree node, P p) {
+ return scan(node.getBounds(), p);
+ }
+
public R visitTypeParameter(TypeParameterTree node, P p) {
R r = scan(node.getBounds(), p);
return r;
--- a/langtools/src/share/classes/com/sun/tools/classfile/Instruction.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/classfile/Instruction.java Mon Dec 17 08:30:06 2012 -0500
@@ -71,11 +71,16 @@
SHORT(3),
/** Wide opcode is not followed by any operands. */
WIDE_NO_OPERANDS(2),
+ /** Wide opcode is followed by a 2-byte index into the local variables array. */
+ WIDE_LOCAL(4),
/** Wide opcode is followed by a 2-byte index into the constant pool. */
WIDE_CPREF_W(4),
/** Wide opcode is followed by a 2-byte index into the constant pool,
* and a signed short value. */
WIDE_CPREF_W_SHORT(6),
+ /** Wide opcode is followed by a 2-byte reference to a local variable,
+ * and a signed short value. */
+ WIDE_LOCAL_SHORT(6),
/** Opcode was not recognized. */
UNKNOWN(1);
@@ -101,7 +106,7 @@
R visitConstantPoolRef(Instruction instr, int index, P p);
/** See {@link Kind#CPREF_W_UBYTE}, {@link Kind#CPREF_W_UBYTE_ZERO}, {@link Kind#WIDE_CPREF_W_SHORT}. */
R visitConstantPoolRefAndValue(Instruction instr, int index, int value, P p);
- /** See {@link Kind#LOCAL}. */
+ /** See {@link Kind#LOCAL}, {@link Kind#WIDE_LOCAL}. */
R visitLocal(Instruction instr, int index, P p);
/** See {@link Kind#LOCAL_BYTE}. */
R visitLocalAndValue(Instruction instr, int index, int value, P p);
@@ -315,6 +320,9 @@
case WIDE_NO_OPERANDS:
return visitor.visitNoOperands(this, p);
+ case WIDE_LOCAL:
+ return visitor.visitLocal(this, getUnsignedShort(2), p);
+
case WIDE_CPREF_W:
return visitor.visitConstantPoolRef(this, getUnsignedShort(2), p);
@@ -322,6 +330,10 @@
return visitor.visitConstantPoolRefAndValue(
this, getUnsignedShort(2), getUnsignedByte(4), p);
+ case WIDE_LOCAL_SHORT:
+ return visitor.visitLocalAndValue(
+ this, getUnsignedShort(2), getShort(4), p);
+
case UNKNOWN:
return visitor.visitUnknown(this, p);
--- a/langtools/src/share/classes/com/sun/tools/classfile/Opcode.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/classfile/Opcode.java Mon Dec 17 08:30:06 2012 -0500
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -246,18 +246,18 @@
// impdep 0xff: Picojava priv
// wide opcodes
- ILOAD_W(0xc415, WIDE_CPREF_W),
- LLOAD_W(0xc416, WIDE_CPREF_W),
- FLOAD_W(0xc417, WIDE_CPREF_W),
- DLOAD_W(0xc418, WIDE_CPREF_W),
- ALOAD_W(0xc419, WIDE_CPREF_W),
- ISTORE_W(0xc436, WIDE_CPREF_W),
- LSTORE_W(0xc437, WIDE_CPREF_W),
- FSTORE_W(0xc438, WIDE_CPREF_W),
- DSTORE_W(0xc439, WIDE_CPREF_W),
- ASTORE_W(0xc43a, WIDE_CPREF_W),
- IINC_W(0xc484, WIDE_CPREF_W_SHORT),
- RET_W(0xc4a9, WIDE_CPREF_W),
+ ILOAD_W(0xc415, WIDE_LOCAL),
+ LLOAD_W(0xc416, WIDE_LOCAL),
+ FLOAD_W(0xc417, WIDE_LOCAL),
+ DLOAD_W(0xc418, WIDE_LOCAL),
+ ALOAD_W(0xc419, WIDE_LOCAL),
+ ISTORE_W(0xc436, WIDE_LOCAL),
+ LSTORE_W(0xc437, WIDE_LOCAL),
+ FSTORE_W(0xc438, WIDE_LOCAL),
+ DSTORE_W(0xc439, WIDE_LOCAL),
+ ASTORE_W(0xc43a, WIDE_LOCAL),
+ IINC_W(0xc484, WIDE_LOCAL_SHORT),
+ RET_W(0xc4a9, WIDE_LOCAL),
// PicoJava nonpriv instructions
LOAD_UBYTE(PICOJAVA, 0xfe00),
--- a/langtools/src/share/classes/com/sun/tools/javac/code/Source.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/code/Source.java Mon Dec 17 08:30:06 2012 -0500
@@ -215,6 +215,9 @@
public boolean allowRepeatedAnnotations() {
return compareTo(JDK1_8) >= 0;
}
+ public boolean allowIntersectionTypesInCast() {
+ return compareTo(JDK1_8) >= 0;
+ }
public static SourceVersion toSourceVersion(Source source) {
switch(source) {
case JDK1_2:
--- a/langtools/src/share/classes/com/sun/tools/javac/code/Type.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/code/Type.java Mon Dec 17 08:30:06 2012 -0500
@@ -839,6 +839,49 @@
}
}
+ // a clone of a ClassType that knows about the bounds of an intersection type.
+ public static class IntersectionClassType extends ClassType implements IntersectionType {
+
+ public boolean allInterfaces;
+
+ public enum IntersectionKind {
+ EXPLICIT,
+ IMPLICT;
+ }
+
+ public IntersectionKind intersectionKind;
+
+ public IntersectionClassType(List<Type> bounds, ClassSymbol csym, boolean allInterfaces) {
+ super(Type.noType, List.<Type>nil(), csym);
+ this.allInterfaces = allInterfaces;
+ Assert.check((csym.flags() & COMPOUND) != 0);
+ supertype_field = bounds.head;
+ interfaces_field = bounds.tail;
+ Assert.check(supertype_field.tsym.completer != null ||
+ !supertype_field.isInterface(), supertype_field);
+ }
+
+ public java.util.List<? extends TypeMirror> getBounds() {
+ return Collections.unmodifiableList(getComponents());
+ }
+
+ public List<Type> getComponents() {
+ return interfaces_field.prepend(supertype_field);
+ }
+
+ @Override
+ public TypeKind getKind() {
+ return TypeKind.INTERSECTION;
+ }
+
+ @Override
+ public <R, P> R accept(TypeVisitor<R, P> v, P p) {
+ return intersectionKind == IntersectionKind.EXPLICIT ?
+ v.visitIntersection(this, p) :
+ v.visitDeclared(this, p);
+ }
+ }
+
public static class ArrayType extends Type
implements javax.lang.model.type.ArrayType {
--- a/langtools/src/share/classes/com/sun/tools/javac/code/Types.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/code/Types.java Mon Dec 17 08:30:06 2012 -0500
@@ -26,7 +26,13 @@
package com.sun.tools.javac.code;
import java.lang.ref.SoftReference;
-import java.util.*;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.HashMap;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Set;
+import java.util.WeakHashMap;
import com.sun.tools.javac.code.Attribute.RetentionPolicy;
import com.sun.tools.javac.code.Lint.LintCategory;
@@ -383,28 +389,6 @@
}
/**
- * Scope filter used to skip methods that should be ignored during
- * function interface conversion (such as methods overridden by
- * j.l.Object)
- */
- class DescriptorFilter implements Filter<Symbol> {
-
- TypeSymbol origin;
-
- DescriptorFilter(TypeSymbol origin) {
- this.origin = origin;
- }
-
- @Override
- public boolean accepts(Symbol sym) {
- return sym.kind == Kinds.MTH &&
- (sym.flags() & (ABSTRACT | DEFAULT)) == ABSTRACT &&
- !overridesObjectMethod(origin, sym) &&
- (interfaceCandidates(origin.type, (MethodSymbol)sym).head.flags() & DEFAULT) == 0;
- }
- };
-
- /**
* Compute the function descriptor associated with a given functional interface
*/
public FunctionDescriptor findDescriptorInternal(TypeSymbol origin, CompoundScope membersCache) throws FunctionDescriptorLookupError {
@@ -431,23 +415,8 @@
throw failure("not.a.functional.intf.1",
diags.fragment("no.abstracts", Kinds.kindName(origin), origin));
} else if (abstracts.size() == 1) {
- if (abstracts.first().type.tag == FORALL) {
- throw failure("invalid.generic.desc.in.functional.intf",
- abstracts.first(),
- Kinds.kindName(origin),
- origin);
- } else {
- return new FunctionDescriptor(abstracts.first());
- }
+ return new FunctionDescriptor(abstracts.first());
} else { // size > 1
- for (Symbol msym : abstracts) {
- if (msym.type.tag == FORALL) {
- throw failure("invalid.generic.desc.in.functional.intf",
- abstracts.first(),
- Kinds.kindName(origin),
- origin);
- }
- }
FunctionDescriptor descRes = mergeDescriptors(origin, abstracts.toList());
if (descRes == null) {
//we can get here if the functional interface is ill-formed
@@ -586,6 +555,85 @@
}
// </editor-fold>
+ /**
+ * Scope filter used to skip methods that should be ignored (such as methods
+ * overridden by j.l.Object) during function interface conversion/marker interface checks
+ */
+ class DescriptorFilter implements Filter<Symbol> {
+
+ TypeSymbol origin;
+
+ DescriptorFilter(TypeSymbol origin) {
+ this.origin = origin;
+ }
+
+ @Override
+ public boolean accepts(Symbol sym) {
+ return sym.kind == Kinds.MTH &&
+ (sym.flags() & (ABSTRACT | DEFAULT)) == ABSTRACT &&
+ !overridesObjectMethod(origin, sym) &&
+ (interfaceCandidates(origin.type, (MethodSymbol)sym).head.flags() & DEFAULT) == 0;
+ }
+ };
+
+ // <editor-fold defaultstate="collapsed" desc="isMarker">
+
+ /**
+ * A cache that keeps track of marker interfaces
+ */
+ class MarkerCache {
+
+ private WeakHashMap<TypeSymbol, Entry> _map = new WeakHashMap<TypeSymbol, Entry>();
+
+ class Entry {
+ final boolean isMarkerIntf;
+ final int prevMark;
+
+ public Entry(boolean isMarkerIntf,
+ int prevMark) {
+ this.isMarkerIntf = isMarkerIntf;
+ this.prevMark = prevMark;
+ }
+
+ boolean matches(int mark) {
+ return this.prevMark == mark;
+ }
+ }
+
+ boolean get(TypeSymbol origin) throws FunctionDescriptorLookupError {
+ Entry e = _map.get(origin);
+ CompoundScope members = membersClosure(origin.type, false);
+ if (e == null ||
+ !e.matches(members.getMark())) {
+ boolean isMarkerIntf = isMarkerInterfaceInternal(origin, members);
+ _map.put(origin, new Entry(isMarkerIntf, members.getMark()));
+ return isMarkerIntf;
+ }
+ else {
+ return e.isMarkerIntf;
+ }
+ }
+
+ /**
+ * Is given symbol a marker interface
+ */
+ public boolean isMarkerInterfaceInternal(TypeSymbol origin, CompoundScope membersCache) throws FunctionDescriptorLookupError {
+ return !origin.isInterface() ?
+ false :
+ !membersCache.getElements(new DescriptorFilter(origin)).iterator().hasNext();
+ }
+ }
+
+ private MarkerCache markerCache = new MarkerCache();
+
+ /**
+ * Is given type a marker interface?
+ */
+ public boolean isMarkerInterface(Type site) {
+ return markerCache.get(site.tsym);
+ }
+ // </editor-fold>
+
// <editor-fold defaultstate="collapsed" desc="isSubtype">
/**
* Is t an unchecked subtype of s?
@@ -1964,45 +2012,28 @@
* @param supertype is objectType if all bounds are interfaces,
* null otherwise.
*/
- public Type makeCompoundType(List<Type> bounds,
- Type supertype) {
+ public Type makeCompoundType(List<Type> bounds) {
+ return makeCompoundType(bounds, bounds.head.tsym.isInterface());
+ }
+ public Type makeCompoundType(List<Type> bounds, boolean allInterfaces) {
+ Assert.check(bounds.nonEmpty());
+ Type firstExplicitBound = bounds.head;
+ if (allInterfaces) {
+ bounds = bounds.prepend(syms.objectType);
+ }
ClassSymbol bc =
new ClassSymbol(ABSTRACT|PUBLIC|SYNTHETIC|COMPOUND|ACYCLIC,
Type.moreInfo
? names.fromString(bounds.toString())
: names.empty,
+ null,
syms.noSymbol);
- if (bounds.head.tag == TYPEVAR)
- // error condition, recover
- bc.erasure_field = syms.objectType;
- else
- bc.erasure_field = erasure(bounds.head);
- bc.members_field = new Scope(bc);
- ClassType bt = (ClassType)bc.type;
- bt.allparams_field = List.nil();
- if (supertype != null) {
- bt.supertype_field = supertype;
- bt.interfaces_field = bounds;
- } else {
- bt.supertype_field = bounds.head;
- bt.interfaces_field = bounds.tail;
- }
- Assert.check(bt.supertype_field.tsym.completer != null
- || !bt.supertype_field.isInterface(),
- bt.supertype_field);
- return bt;
- }
-
- /**
- * Same as {@link #makeCompoundType(List,Type)}, except that the
- * second parameter is computed directly. Note that this might
- * cause a symbol completion. Hence, this version of
- * makeCompoundType may not be called during a classfile read.
- */
- public Type makeCompoundType(List<Type> bounds) {
- Type supertype = (bounds.head.tsym.flags() & INTERFACE) != 0 ?
- supertype(bounds.head) : null;
- return makeCompoundType(bounds, supertype);
+ bc.type = new IntersectionClassType(bounds, bc, allInterfaces);
+ bc.erasure_field = (bounds.head.tag == TYPEVAR) ?
+ syms.objectType : // error condition, recover
+ erasure(firstExplicitBound);
+ bc.members_field = new Scope(bc);
+ return bc.type;
}
/**
@@ -2192,12 +2223,8 @@
* @param supertype is objectType if all bounds are interfaces,
* null otherwise.
*/
- public void setBounds(TypeVar t, List<Type> bounds, Type supertype) {
- if (bounds.tail.isEmpty())
- t.bound = bounds.head;
- else
- t.bound = makeCompoundType(bounds, supertype);
- t.rank_field = -1;
+ public void setBounds(TypeVar t, List<Type> bounds) {
+ setBounds(t, bounds, bounds.head.tsym.isInterface());
}
/**
@@ -2209,10 +2236,10 @@
* Note that this check might cause a symbol completion. Hence, this version of
* setBounds may not be called during a classfile read.
*/
- public void setBounds(TypeVar t, List<Type> bounds) {
- Type supertype = (bounds.head.tsym.flags() & INTERFACE) != 0 ?
- syms.objectType : null;
- setBounds(t, bounds, supertype);
+ public void setBounds(TypeVar t, List<Type> bounds, boolean allInterfaces) {
+ t.bound = bounds.tail.isEmpty() ?
+ bounds.head :
+ makeCompoundType(bounds, allInterfaces);
t.rank_field = -1;
}
// </editor-fold>
@@ -2222,7 +2249,7 @@
* Return list of bounds of the given type variable.
*/
public List<Type> getBounds(TypeVar t) {
- if (t.bound.hasTag(NONE))
+ if (t.bound.hasTag(NONE))
return List.nil();
else if (t.bound.isErroneous() || !t.bound.isCompound())
return List.of(t.bound);
@@ -3321,8 +3348,7 @@
if (arraySuperType == null) {
// JLS 10.8: all arrays implement Cloneable and Serializable.
arraySuperType = makeCompoundType(List.of(syms.serializableType,
- syms.cloneableType),
- syms.objectType);
+ syms.cloneableType), true);
}
}
}
--- a/langtools/src/share/classes/com/sun/tools/javac/comp/Attr.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/comp/Attr.java Mon Dec 17 08:30:06 2012 -0500
@@ -716,21 +716,8 @@
}
a.tsym.flags_field &= ~UNATTRIBUTED;
}
- for (JCTypeParameter tvar : typarams)
+ for (JCTypeParameter tvar : typarams) {
chk.checkNonCyclic(tvar.pos(), (TypeVar)tvar.type);
- attribStats(typarams, env);
- }
-
- void attribBounds(List<JCTypeParameter> typarams) {
- for (JCTypeParameter typaram : typarams) {
- Type bound = typaram.type.getUpperBound();
- if (bound != null && bound.tsym instanceof ClassSymbol) {
- ClassSymbol c = (ClassSymbol)bound.tsym;
- if ((c.flags_field & COMPOUND) != 0) {
- Assert.check((c.flags_field & UNATTRIBUTED) != 0, c);
- attribClass(typaram.pos(), c);
- }
- }
}
}
@@ -892,7 +879,12 @@
deferredLintHandler.flush(tree.pos());
chk.checkDeprecatedAnnotation(tree.pos(), m);
- attribBounds(tree.typarams);
+ // Create a new environment with local scope
+ // for attributing the method.
+ Env<AttrContext> localEnv = memberEnter.methodEnv(tree, env);
+ localEnv.info.lint = lint;
+
+ attribStats(tree.typarams, localEnv);
// If we override any other methods, check that we do so properly.
// JLS ???
@@ -903,12 +895,6 @@
}
chk.checkOverride(tree, m);
- // Create a new environment with local scope
- // for attributing the method.
- Env<AttrContext> localEnv = memberEnter.methodEnv(tree, env);
-
- localEnv.info.lint = lint;
-
if (isDefaultMethod && types.overridesObjectMethod(m.enclClass(), m)) {
log.error(tree, "default.overrides.object.member", m.name, Kinds.kindName(m.location()), m.location());
}
@@ -2196,7 +2182,7 @@
Type target;
Type lambdaType;
if (pt() != Type.recoveryType) {
- target = infer.instantiateFunctionalInterface(that, pt(), explicitParamTypes, resultInfo.checkContext);
+ target = infer.instantiateFunctionalInterface(that, checkIntersectionTarget(that, resultInfo), explicitParamTypes, resultInfo.checkContext);
lambdaType = types.findDescriptorType(target);
chk.checkFunctionalInterface(that, target);
} else {
@@ -2204,6 +2190,14 @@
lambdaType = fallbackDescriptorType(that);
}
+ if (lambdaType.hasTag(FORALL)) {
+ //lambda expression target desc cannot be a generic method
+ resultInfo.checkContext.report(that, diags.fragment("invalid.generic.lambda.target",
+ lambdaType, kindName(target.tsym), target.tsym));
+ result = that.type = types.createErrorType(pt());
+ return;
+ }
+
if (!TreeInfo.isExplicitLambda(that)) {
//add param type info in the AST
List<Type> actuals = lambdaType.getParameterTypes();
@@ -2244,9 +2238,13 @@
//with the target-type, it will be recovered anyway in Attr.checkId
needsRecovery = false;
+ FunctionalReturnContext funcContext = that.getBodyKind() == JCLambda.BodyKind.EXPRESSION ?
+ new ExpressionLambdaReturnContext((JCExpression)that.getBody(), resultInfo.checkContext) :
+ new FunctionalReturnContext(resultInfo.checkContext);
+
ResultInfo bodyResultInfo = lambdaType.getReturnType() == Type.recoveryType ?
recoveryInfo :
- new ResultInfo(VAL, lambdaType.getReturnType(), new LambdaReturnContext(resultInfo.checkContext));
+ new ResultInfo(VAL, lambdaType.getReturnType(), funcContext);
localEnv.info.returnResult = bodyResultInfo;
if (that.getBodyKind() == JCLambda.BodyKind.EXPRESSION) {
@@ -2282,6 +2280,26 @@
}
}
}
+
+ private Type checkIntersectionTarget(DiagnosticPosition pos, ResultInfo resultInfo) {
+ Type pt = resultInfo.pt;
+ if (pt != Type.recoveryType && pt.isCompound()) {
+ IntersectionClassType ict = (IntersectionClassType)pt;
+ List<Type> bounds = ict.allInterfaces ?
+ ict.getComponents().tail :
+ ict.getComponents();
+ types.findDescriptorType(bounds.head); //propagate exception outwards!
+ for (Type bound : bounds.tail) {
+ if (!types.isMarkerInterface(bound)) {
+ resultInfo.checkContext.report(pos, diags.fragment("secondary.bound.must.be.marker.intf", bound));
+ }
+ }
+ //for now (translation doesn't support intersection types)
+ return bounds.head;
+ } else {
+ return pt;
+ }
+ }
//where
private Type fallbackDescriptorType(JCExpression tree) {
switch (tree.getTag()) {
@@ -2327,8 +2345,9 @@
* type according to both the inherited context and the assignment
* context.
*/
- class LambdaReturnContext extends Check.NestedCheckContext {
- public LambdaReturnContext(CheckContext enclosingContext) {
+ class FunctionalReturnContext extends Check.NestedCheckContext {
+
+ FunctionalReturnContext(CheckContext enclosingContext) {
super(enclosingContext);
}
@@ -2344,6 +2363,23 @@
}
}
+ class ExpressionLambdaReturnContext extends FunctionalReturnContext {
+
+ JCExpression expr;
+
+ ExpressionLambdaReturnContext(JCExpression expr, CheckContext enclosingContext) {
+ super(enclosingContext);
+ this.expr = expr;
+ }
+
+ @Override
+ public boolean compatible(Type found, Type req, Warner warn) {
+ //a void return is compatible with an expression statement lambda
+ return TreeInfo.isExpressionStatement(expr) && req.hasTag(VOID) ||
+ super.compatible(found, req, warn);
+ }
+ }
+
/**
* Lambda compatibility. Check that given return types, thrown types, parameter types
* are compatible with the expected functional interface descriptor. This means that:
@@ -2428,7 +2464,7 @@
}
//attrib type-arguments
- List<Type> typeargtypes = null;
+ List<Type> typeargtypes = List.nil();
if (that.typeargs != null) {
typeargtypes = attribTypes(that.typeargs, localEnv);
}
@@ -2436,7 +2472,7 @@
Type target;
Type desc;
if (pt() != Type.recoveryType) {
- target = infer.instantiateFunctionalInterface(that, pt(), null, resultInfo.checkContext);
+ target = infer.instantiateFunctionalInterface(that, checkIntersectionTarget(that, resultInfo), null, resultInfo.checkContext);
desc = types.findDescriptorType(target);
chk.checkFunctionalInterface(that, target);
} else {
@@ -2498,6 +2534,26 @@
}
}
+ if (resultInfo.checkContext.deferredAttrContext().mode == AttrMode.CHECK) {
+ if (refSym.isStatic() && TreeInfo.isStaticSelector(that.expr, names) &&
+ exprType.getTypeArguments().nonEmpty()) {
+ //static ref with class type-args
+ log.error(that.expr.pos(), "invalid.mref", Kinds.kindName(that.getMode()),
+ diags.fragment("static.mref.with.targs"));
+ result = that.type = types.createErrorType(target);
+ return;
+ }
+
+ if (refSym.isStatic() && !TreeInfo.isStaticSelector(that.expr, names) &&
+ !lookupHelper.referenceKind(refSym).isUnbound()) {
+ //no static bound mrefs
+ log.error(that.expr.pos(), "invalid.mref", Kinds.kindName(that.getMode()),
+ diags.fragment("static.bound.mref"));
+ result = that.type = types.createErrorType(target);
+ return;
+ }
+ }
+
if (desc.getReturnType() == Type.recoveryType) {
// stop here
result = that.type = target;
@@ -2560,7 +2616,7 @@
if (!returnType.hasTag(VOID) && !resType.hasTag(VOID)) {
if (resType.isErroneous() ||
- new LambdaReturnContext(checkContext).compatible(resType, returnType, types.noWarnings)) {
+ new FunctionalReturnContext(checkContext).compatible(resType, returnType, types.noWarnings)) {
incompatibleReturnType = null;
}
}
@@ -3525,63 +3581,79 @@
tree.type = result = t;
}
- public void visitTypeParameter(JCTypeParameter tree) {
- TypeVar a = (TypeVar)tree.type;
+ public void visitTypeIntersection(JCTypeIntersection tree) {
+ attribTypes(tree.bounds, env);
+ tree.type = result = checkIntersection(tree, tree.bounds);
+ }
+
+ public void visitTypeParameter(JCTypeParameter tree) {
+ TypeVar typeVar = (TypeVar)tree.type;
+ if (!typeVar.bound.isErroneous()) {
+ //fixup type-parameter bound computed in 'attribTypeVariables'
+ typeVar.bound = checkIntersection(tree, tree.bounds);
+ }
+ }
+
+ Type checkIntersection(JCTree tree, List<JCExpression> bounds) {
Set<Type> boundSet = new HashSet<Type>();
- if (a.bound.isErroneous())
- return;
- List<Type> bs = types.getBounds(a);
- if (tree.bounds.nonEmpty()) {
+ if (bounds.nonEmpty()) {
// accept class or interface or typevar as first bound.
- Type b = checkBase(bs.head, tree.bounds.head, env, false, false, false);
- boundSet.add(types.erasure(b));
- if (b.isErroneous()) {
- a.bound = b;
+ bounds.head.type = checkBase(bounds.head.type, bounds.head, env, false, false, false);
+ boundSet.add(types.erasure(bounds.head.type));
+ if (bounds.head.type.isErroneous()) {
+ return bounds.head.type;
}
- else if (b.hasTag(TYPEVAR)) {
+ else if (bounds.head.type.hasTag(TYPEVAR)) {
// if first bound was a typevar, do not accept further bounds.
- if (tree.bounds.tail.nonEmpty()) {
- log.error(tree.bounds.tail.head.pos(),
+ if (bounds.tail.nonEmpty()) {
+ log.error(bounds.tail.head.pos(),
"type.var.may.not.be.followed.by.other.bounds");
- tree.bounds = List.of(tree.bounds.head);
- a.bound = bs.head;
+ return bounds.head.type;
}
} else {
// if first bound was a class or interface, accept only interfaces
// as further bounds.
- for (JCExpression bound : tree.bounds.tail) {
- bs = bs.tail;
- Type i = checkBase(bs.head, bound, env, false, true, false);
- if (i.isErroneous())
- a.bound = i;
- else if (i.hasTag(CLASS))
- chk.checkNotRepeated(bound.pos(), types.erasure(i), boundSet);
+ for (JCExpression bound : bounds.tail) {
+ bound.type = checkBase(bound.type, bound, env, false, true, false);
+ if (bound.type.isErroneous()) {
+ bounds = List.of(bound);
+ }
+ else if (bound.type.hasTag(CLASS)) {
+ chk.checkNotRepeated(bound.pos(), types.erasure(bound.type), boundSet);
+ }
}
}
}
- bs = types.getBounds(a);
-
- // in case of multiple bounds ...
- if (bs.length() > 1) {
+
+ if (bounds.length() == 0) {
+ return syms.objectType;
+ } else if (bounds.length() == 1) {
+ return bounds.head.type;
+ } else {
+ Type owntype = types.makeCompoundType(TreeInfo.types(bounds));
+ if (tree.hasTag(TYPEINTERSECTION)) {
+ ((IntersectionClassType)owntype).intersectionKind =
+ IntersectionClassType.IntersectionKind.EXPLICIT;
+ }
// ... the variable's bound is a class type flagged COMPOUND
// (see comment for TypeVar.bound).
// In this case, generate a class tree that represents the
// bound class, ...
JCExpression extending;
List<JCExpression> implementing;
- if ((bs.head.tsym.flags() & INTERFACE) == 0) {
- extending = tree.bounds.head;
- implementing = tree.bounds.tail;
+ if (!bounds.head.type.isInterface()) {
+ extending = bounds.head;
+ implementing = bounds.tail;
} else {
extending = null;
- implementing = tree.bounds;
+ implementing = bounds;
}
- JCClassDecl cd = make.at(tree.pos).ClassDef(
+ JCClassDecl cd = make.at(tree).ClassDef(
make.Modifiers(PUBLIC | ABSTRACT),
- tree.name, List.<JCTypeParameter>nil(),
+ names.empty, List.<JCTypeParameter>nil(),
extending, implementing, List.<JCTree>nil());
- ClassSymbol c = (ClassSymbol)a.getUpperBound().tsym;
+ ClassSymbol c = (ClassSymbol)owntype.tsym;
Assert.check((c.flags() & COMPOUND) != 0);
cd.sym = c;
c.sourcefile = env.toplevel.sourcefile;
@@ -3590,10 +3662,11 @@
c.flags_field |= UNATTRIBUTED;
Env<AttrContext> cenv = enter.classEnv(cd, env);
enter.typeEnvs.put(c, cenv);
+ attribClass(c);
+ return owntype;
}
}
-
public void visitWildcard(JCWildcard tree) {
//- System.err.println("visitWildcard("+tree+");");//DEBUG
Type type = (tree.kind.kind == BoundKind.UNBOUND)
@@ -3747,7 +3820,7 @@
chk.validateAnnotations(tree.mods.annotations, c);
// Validate type parameters, supertype and interfaces.
- attribBounds(tree.typarams);
+ attribStats(tree.typarams, env);
if (!c.isAnonymous()) {
//already checked if anonymous
chk.validate(tree.typarams, env);
--- a/langtools/src/share/classes/com/sun/tools/javac/comp/LambdaToMethod.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/comp/LambdaToMethod.java Mon Dec 17 08:30:06 2012 -0500
@@ -288,21 +288,20 @@
JCExpression init;
switch(tree.kind) {
- case IMPLICIT_INNER: /** Inner # new */
- case SUPER: /** super # instMethod */
+ case IMPLICIT_INNER: /** Inner :: new */
+ case SUPER: /** super :: instMethod */
init = makeThis(
localContext.owner.owner.asType(),
localContext.owner);
break;
- case BOUND: /** Expr # instMethod */
+ case BOUND: /** Expr :: instMethod */
init = tree.getQualifierExpression();
break;
- case STATIC_EVAL: /** Expr # staticMethod */
- case UNBOUND: /** Type # instMethod */
- case STATIC: /** Type # staticMethod */
- case TOPLEVEL: /** Top level # new */
+ case UNBOUND: /** Type :: instMethod */
+ case STATIC: /** Type :: staticMethod */
+ case TOPLEVEL: /** Top level :: new */
init = null;
break;
@@ -315,14 +314,6 @@
//build a sam instance using an indy call to the meta-factory
result = makeMetaFactoryIndyCall(tree, tree.targetType, localContext.referenceKind(), refSym, indy_args);
-
- //if we had a static reference with non-static qualifier, add a let
- //expression to force the evaluation of the qualifier expr
- if (tree.hasKind(ReferenceKind.STATIC_EVAL)) {
- VarSymbol rec = new VarSymbol(0, names.fromString("rec$"), tree.getQualifierExpression().type, localContext.owner);
- JCVariableDecl recDef = make.VarDef(rec, tree.getQualifierExpression());
- result = make.LetExpr(recDef, result).setType(tree.type);
- }
}
/**
--- a/langtools/src/share/classes/com/sun/tools/javac/comp/Lower.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/comp/Lower.java Mon Dec 17 08:30:06 2012 -0500
@@ -138,6 +138,10 @@
*/
Map<ClassSymbol, JCClassDecl> classdefs;
+ /** A hash table mapping local classes to a list of pruned trees.
+ */
+ public Map<ClassSymbol, List<JCTree>> prunedTree = new WeakHashMap<ClassSymbol, List<JCTree>>();
+
/** A hash table mapping virtual accessed symbols in outer subclasses
* to the actually referred symbol in superclasses.
*/
@@ -1039,6 +1043,12 @@
}
}
+ private void addPrunedInfo(JCTree tree) {
+ List<JCTree> infoList = prunedTree.get(currentClass);
+ infoList = (infoList == null) ? List.of(tree) : infoList.prepend(tree);
+ prunedTree.put(currentClass, infoList);
+ }
+
/** Ensure that identifier is accessible, return tree accessing the identifier.
* @param sym The accessed symbol.
* @param tree The tree referring to the symbol.
@@ -1111,7 +1121,10 @@
// Constants are replaced by their constant value.
if (sym.kind == VAR) {
Object cv = ((VarSymbol)sym).getConstValue();
- if (cv != null) return makeLit(sym.type, cv);
+ if (cv != null) {
+ addPrunedInfo(tree);
+ return makeLit(sym.type, cv);
+ }
}
// Private variables and methods are replaced by calls
@@ -2746,12 +2759,15 @@
/** Visitor method for conditional expressions.
*/
+ @Override
public void visitConditional(JCConditional tree) {
JCTree cond = tree.cond = translate(tree.cond, syms.booleanType);
if (cond.type.isTrue()) {
result = convert(translate(tree.truepart, tree.type), tree.type);
+ addPrunedInfo(cond);
} else if (cond.type.isFalse()) {
result = convert(translate(tree.falsepart, tree.type), tree.type);
+ addPrunedInfo(cond);
} else {
// Condition is not a compile-time constant.
tree.truepart = translate(tree.truepart, tree.type);
@@ -2760,14 +2776,14 @@
}
}
//where
- private JCTree convert(JCTree tree, Type pt) {
- if (tree.type == pt || tree.type.hasTag(BOT))
- return tree;
- JCTree result = make_at(tree.pos()).TypeCast(make.Type(pt), (JCExpression)tree);
- result.type = (tree.type.constValue() != null) ? cfolder.coerce(tree.type, pt)
- : pt;
- return result;
- }
+ private JCTree convert(JCTree tree, Type pt) {
+ if (tree.type == pt || tree.type.hasTag(BOT))
+ return tree;
+ JCTree result = make_at(tree.pos()).TypeCast(make.Type(pt), (JCExpression)tree);
+ result.type = (tree.type.constValue() != null) ? cfolder.coerce(tree.type, pt)
+ : pt;
+ return result;
+ }
/** Visitor method for if statements.
*/
@@ -2775,12 +2791,14 @@
JCTree cond = tree.cond = translate(tree.cond, syms.booleanType);
if (cond.type.isTrue()) {
result = translate(tree.thenpart);
+ addPrunedInfo(cond);
} else if (cond.type.isFalse()) {
if (tree.elsepart != null) {
result = translate(tree.elsepart);
} else {
result = make.Skip();
}
+ addPrunedInfo(cond);
} else {
// Condition is not a compile-time constant.
tree.thenpart = translate(tree.thenpart);
--- a/langtools/src/share/classes/com/sun/tools/javac/comp/Resolve.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/comp/Resolve.java Mon Dec 17 08:30:06 2012 -0500
@@ -2617,8 +2617,7 @@
@Override
ReferenceKind referenceKind(Symbol sym) {
if (sym.isStatic()) {
- return TreeInfo.isStaticSelector(referenceTree.expr, names) ?
- ReferenceKind.STATIC : ReferenceKind.STATIC_EVAL;
+ return ReferenceKind.STATIC;
} else {
Name selName = TreeInfo.name(referenceTree.getQualifierExpression());
return selName != null && selName == names._super ?
--- a/langtools/src/share/classes/com/sun/tools/javac/comp/TransTypes.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/comp/TransTypes.java Mon Dec 17 08:30:06 2012 -0500
@@ -551,6 +551,7 @@
tree.body = translate(tree.body, null);
//save non-erased target
tree.targetType = tree.type;
+ Assert.check(!tree.targetType.isCompound(), "Intersection-type targets not supported yet!");
tree.type = erasure(tree.type);
result = tree;
}
@@ -786,6 +787,7 @@
tree.expr = translate(tree.expr, null);
//save non-erased target
tree.targetType = tree.type;
+ Assert.check(!tree.targetType.isCompound(), "Intersection-type targets not supported yet!");
tree.type = erasure(tree.type);
result = tree;
}
@@ -803,6 +805,12 @@
result = clazz;
}
+ public void visitTypeIntersection(JCTypeIntersection tree) {
+ tree.bounds = translate(tree.bounds, null);
+ tree.type = erasure(tree.type);
+ result = tree;
+ }
+
/**************************************************************************
* utility methods
*************************************************************************/
--- a/langtools/src/share/classes/com/sun/tools/javac/jvm/ClassReader.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/jvm/ClassReader.java Mon Dec 17 08:30:06 2012 -0500
@@ -846,17 +846,17 @@
tvar = (TypeVar)findTypeVar(name);
}
List<Type> bounds = List.nil();
- Type st = null;
+ boolean allInterfaces = false;
if (signature[sigp] == ':' && signature[sigp+1] == ':') {
sigp++;
- st = syms.objectType;
+ allInterfaces = true;
}
while (signature[sigp] == ':') {
sigp++;
bounds = bounds.prepend(sigToType());
}
if (!sigEnterPhase) {
- types.setBounds(tvar, bounds.reverse(), st);
+ types.setBounds(tvar, bounds.reverse(), allInterfaces);
}
return tvar;
}
--- a/langtools/src/share/classes/com/sun/tools/javac/jvm/Gen.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/jvm/Gen.java Mon Dec 17 08:30:06 2012 -0500
@@ -71,6 +71,7 @@
private final Map<Type,Symbol> stringBufferAppend;
private Name accessDollar;
private final Types types;
+ private final Lower lower;
/** Switch: GJ mode?
*/
@@ -112,6 +113,7 @@
stringBufferAppend = new HashMap<Type,Symbol>();
accessDollar = names.
fromString("access" + target.syntheticNameChar());
+ lower = Lower.instance(context);
Options options = Options.instance(context);
lineDebugInfo =
@@ -816,6 +818,62 @@
}
}
+ /** Visitor class for expressions which might be constant expressions.
+ * This class is a subset of TreeScanner. Intended to visit trees pruned by
+ * Lower as long as constant expressions looking for references to any
+ * ClassSymbol. Any such reference will be added to the constant pool so
+ * automated tools can detect class dependencies better.
+ */
+ class ClassReferenceVisitor extends JCTree.Visitor {
+
+ @Override
+ public void visitTree(JCTree tree) {}
+
+ @Override
+ public void visitBinary(JCBinary tree) {
+ tree.lhs.accept(this);
+ tree.rhs.accept(this);
+ }
+
+ @Override
+ public void visitSelect(JCFieldAccess tree) {
+ if (tree.selected.type.hasTag(CLASS)) {
+ makeRef(tree.selected.pos(), tree.selected.type);
+ }
+ }
+
+ @Override
+ public void visitIdent(JCIdent tree) {
+ if (tree.sym.owner instanceof ClassSymbol) {
+ pool.put(tree.sym.owner);
+ }
+ }
+
+ @Override
+ public void visitConditional(JCConditional tree) {
+ tree.cond.accept(this);
+ tree.truepart.accept(this);
+ tree.falsepart.accept(this);
+ }
+
+ @Override
+ public void visitUnary(JCUnary tree) {
+ tree.arg.accept(this);
+ }
+
+ @Override
+ public void visitParens(JCParens tree) {
+ tree.expr.accept(this);
+ }
+
+ @Override
+ public void visitTypeCast(JCTypeCast tree) {
+ tree.expr.accept(this);
+ }
+ }
+
+ private ClassReferenceVisitor classReferenceVisitor = new ClassReferenceVisitor();
+
/** Visitor method: generate code for an expression, catching and reporting
* any completion failures.
* @param tree The expression to be visited.
@@ -826,6 +884,7 @@
try {
if (tree.type.constValue() != null) {
// Short circuit any expressions which are constants
+ tree.accept(classReferenceVisitor);
checkStringConstant(tree.pos(), tree.type.constValue());
result = items.makeImmediateItem(tree.type, tree.type.constValue());
} else {
@@ -2205,6 +2264,15 @@
code.endScopes(limit);
}
+ private void generateReferencesToPrunedTree(ClassSymbol classSymbol, Pool pool) {
+ List<JCTree> prunedInfo = lower.prunedTree.get(classSymbol);
+ if (prunedInfo != null) {
+ for (JCTree prunedTree: prunedInfo) {
+ prunedTree.accept(classReferenceVisitor);
+ }
+ }
+ }
+
/* ************************************************************************
* main method
*************************************************************************/
@@ -2232,6 +2300,7 @@
cdef.defs = normalizeDefs(cdef.defs, c);
c.pool = pool;
pool.reset();
+ generateReferencesToPrunedTree(c, pool);
Env<GenContext> localEnv =
new Env<GenContext>(cdef, new GenContext());
localEnv.toplevel = env.toplevel;
--- a/langtools/src/share/classes/com/sun/tools/javac/model/JavacTypes.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/model/JavacTypes.java Mon Dec 17 08:30:06 2012 -0500
@@ -74,6 +74,7 @@
public Element asElement(TypeMirror t) {
switch (t.getKind()) {
case DECLARED:
+ case INTERSECTION:
case ERROR:
case TYPEVAR:
Type type = cast(Type.class, t);
--- a/langtools/src/share/classes/com/sun/tools/javac/parser/JavaTokenizer.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/parser/JavaTokenizer.java Mon Dec 17 08:30:06 2012 -0500
@@ -348,8 +348,8 @@
private void scanIdent() {
boolean isJavaIdentifierPart;
char high;
+ reader.putChar(true);
do {
- reader.putChar(true);
switch (reader.ch) {
case 'A': case 'B': case 'C': case 'D': case 'E':
case 'F': case 'G': case 'H': case 'I': case 'J':
@@ -366,6 +366,7 @@
case '$': case '_':
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
+ break;
case '\u0000': case '\u0001': case '\u0002': case '\u0003':
case '\u0004': case '\u0005': case '\u0006': case '\u0007':
case '\u0008': case '\u000E': case '\u000F': case '\u0010':
@@ -373,26 +374,33 @@
case '\u0015': case '\u0016': case '\u0017':
case '\u0018': case '\u0019': case '\u001B':
case '\u007F':
- break;
+ reader.scanChar();
+ continue;
case '\u001A': // EOI is also a legal identifier part
if (reader.bp >= reader.buflen) {
name = reader.name();
tk = tokens.lookupKind(name);
return;
}
- break;
+ reader.scanChar();
+ continue;
default:
if (reader.ch < '\u0080') {
// all ASCII range chars already handled, above
isJavaIdentifierPart = false;
} else {
- high = reader.scanSurrogates();
- if (high != 0) {
- reader.putChar(high);
- isJavaIdentifierPart = Character.isJavaIdentifierPart(
- Character.toCodePoint(high, reader.ch));
+ if (Character.isIdentifierIgnorable(reader.ch)) {
+ reader.scanChar();
+ continue;
} else {
- isJavaIdentifierPart = Character.isJavaIdentifierPart(reader.ch);
+ high = reader.scanSurrogates();
+ if (high != 0) {
+ reader.putChar(high);
+ isJavaIdentifierPart = Character.isJavaIdentifierPart(
+ Character.toCodePoint(high, reader.ch));
+ } else {
+ isJavaIdentifierPart = Character.isJavaIdentifierPart(reader.ch);
+ }
}
}
if (!isJavaIdentifierPart) {
@@ -401,6 +409,7 @@
return;
}
}
+ reader.putChar(true);
} while (true);
}
--- a/langtools/src/share/classes/com/sun/tools/javac/parser/JavacParser.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/parser/JavacParser.java Mon Dec 17 08:30:06 2012 -0500
@@ -124,6 +124,9 @@
this.allowLambda = source.allowLambda();
this.allowMethodReferences = source.allowMethodReferences();
this.allowDefaultMethods = source.allowDefaultMethods();
+ this.allowIntersectionTypesInCast =
+ source.allowIntersectionTypesInCast() &&
+ fac.options.isSet("allowIntersectionTypes");
this.keepDocComments = keepDocComments;
docComments = newDocCommentTable(keepDocComments, fac);
this.keepLineMap = keepLineMap;
@@ -197,6 +200,10 @@
*/
boolean allowDefaultMethods;
+ /** Switch: should we allow intersection types in cast?
+ */
+ boolean allowIntersectionTypesInCast;
+
/** Switch: should we keep docComments?
*/
boolean keepDocComments;
@@ -239,22 +246,38 @@
}
protected boolean peekToken(TokenKind tk) {
- return S.token(1).kind == tk;
+ return peekToken(0, tk);
+ }
+
+ protected boolean peekToken(int lookahead, TokenKind tk) {
+ return S.token(lookahead + 1).kind == tk;
}
protected boolean peekToken(TokenKind tk1, TokenKind tk2) {
- return S.token(1).kind == tk1 &&
- S.token(2).kind == tk2;
+ return peekToken(0, tk1, tk2);
+ }
+
+ protected boolean peekToken(int lookahead, TokenKind tk1, TokenKind tk2) {
+ return S.token(lookahead + 1).kind == tk1 &&
+ S.token(lookahead + 2).kind == tk2;
}
protected boolean peekToken(TokenKind tk1, TokenKind tk2, TokenKind tk3) {
- return S.token(1).kind == tk1 &&
- S.token(2).kind == tk2 &&
- S.token(3).kind == tk3;
+ return peekToken(0, tk1, tk2, tk3);
+ }
+
+ protected boolean peekToken(int lookahead, TokenKind tk1, TokenKind tk2, TokenKind tk3) {
+ return S.token(lookahead + 1).kind == tk1 &&
+ S.token(lookahead + 2).kind == tk2 &&
+ S.token(lookahead + 3).kind == tk3;
}
protected boolean peekToken(TokenKind... kinds) {
- for (int lookahead = 0 ; lookahead < kinds.length ; lookahead++) {
+ return peekToken(0, kinds);
+ }
+
+ protected boolean peekToken(int lookahead, TokenKind... kinds) {
+ for (; lookahead < kinds.length ; lookahead++) {
if (S.token(lookahead + 1).kind != kinds[lookahead]) {
return false;
}
@@ -966,102 +989,40 @@
break;
case LPAREN:
if (typeArgs == null && (mode & EXPR) != 0) {
- if (peekToken(MONKEYS_AT) ||
- peekToken(FINAL) ||
- peekToken(RPAREN) ||
- peekToken(IDENTIFIER, COMMA) ||
- peekToken(IDENTIFIER, RPAREN, ARROW)) {
- //implicit n-ary lambda
- t = lambdaExpressionOrStatement(true, peekToken(MONKEYS_AT) || peekToken(FINAL), pos);
- break;
- } else {
- nextToken();
- mode = EXPR | TYPE | NOPARAMS;
- t = term3();
- if ((mode & TYPE) != 0 && token.kind == LT) {
- // Could be a cast to a parameterized type
- JCTree.Tag op = JCTree.Tag.LT;
- int pos1 = token.pos;
- nextToken();
- mode &= (EXPR | TYPE);
- mode |= TYPEARG;
- JCExpression t1 = term3();
- if ((mode & TYPE) != 0 &&
- (token.kind == COMMA || token.kind == GT)) {
- mode = TYPE;
- ListBuffer<JCExpression> args = new ListBuffer<JCExpression>();
- args.append(t1);
- while (token.kind == COMMA) {
- nextToken();
- args.append(typeArgument());
- }
- accept(GT);
- t = toP(F.at(pos1).TypeApply(t, args.toList()));
- checkGenerics();
- mode = EXPR | TYPE; //could be a lambda or a method ref or a cast to a type
- t = term3Rest(t, typeArgs);
- if (token.kind == IDENTIFIER || token.kind == ELLIPSIS) {
- //explicit lambda (w/ generic type)
- mode = EXPR;
- JCModifiers mods = F.at(token.pos).Modifiers(Flags.PARAMETER);
- if (token.kind == ELLIPSIS) {
- mods.flags = Flags.VARARGS;
- t = to(F.at(token.pos).TypeArray(t));
- nextToken();
- }
- t = lambdaExpressionOrStatement(variableDeclaratorId(mods, t), pos);
- break;
- }
- } else if ((mode & EXPR) != 0) {
- mode = EXPR;
- JCExpression e = term2Rest(t1, TreeInfo.shiftPrec);
- t = F.at(pos1).Binary(op, t, e);
- t = termRest(term1Rest(term2Rest(t, TreeInfo.orPrec)));
- } else {
- accept(GT);
- }
- } else if ((mode & TYPE) != 0 &&
- (token.kind == IDENTIFIER || token.kind == ELLIPSIS)) {
- //explicit lambda (w/ non-generic type)
+ ParensResult pres = analyzeParens();
+ switch (pres) {
+ case CAST:
+ accept(LPAREN);
+ mode = TYPE;
+ int pos1 = pos;
+ List<JCExpression> targets = List.of(t = term3());
+ while (token.kind == AMP) {
+ checkIntersectionTypesInCast();
+ accept(AMP);
+ targets = targets.prepend(term3());
+ }
+ if (targets.length() > 1) {
+ t = toP(F.at(pos1).TypeIntersection(targets.reverse()));
+ }
+ accept(RPAREN);
+ mode = EXPR;
+ JCExpression t1 = term3();
+ return F.at(pos).TypeCast(t, t1);
+ case IMPLICIT_LAMBDA:
+ case EXPLICIT_LAMBDA:
+ t = lambdaExpressionOrStatement(true, pres == ParensResult.EXPLICIT_LAMBDA, pos);
+ break;
+ default: //PARENS
+ accept(LPAREN);
mode = EXPR;
- JCModifiers mods = F.at(token.pos).Modifiers(Flags.PARAMETER);
- if (token.kind == ELLIPSIS) {
- mods.flags = Flags.VARARGS;
- t = to(F.at(token.pos).TypeArray(t));
- nextToken();
- }
- t = lambdaExpressionOrStatement(variableDeclaratorId(mods, t), pos);
+ t = termRest(term1Rest(term2Rest(term3(), TreeInfo.orPrec)));
+ accept(RPAREN);
+ t = toP(F.at(pos).Parens(t));
break;
- } else {
- t = termRest(term1Rest(term2Rest(t, TreeInfo.orPrec)));
- }
- }
-
- accept(RPAREN);
- lastmode = mode;
- mode = EXPR;
- if ((lastmode & EXPR) == 0) {
- JCExpression t1 = term3();
- return F.at(pos).TypeCast(t, t1);
- } else if ((lastmode & TYPE) != 0) {
- switch (token.kind) {
- /*case PLUSPLUS: case SUBSUB: */
- case BANG: case TILDE:
- case LPAREN: case THIS: case SUPER:
- case INTLITERAL: case LONGLITERAL: case FLOATLITERAL:
- case DOUBLELITERAL: case CHARLITERAL: case STRINGLITERAL:
- case TRUE: case FALSE: case NULL:
- case NEW: case IDENTIFIER: case ASSERT: case ENUM:
- case BYTE: case SHORT: case CHAR: case INT:
- case LONG: case FLOAT: case DOUBLE: case BOOLEAN: case VOID:
- JCExpression t1 = term3();
- return F.at(pos).TypeCast(t, t1);
- }
}
} else {
return illegal();
}
- t = toP(F.at(pos).Parens(t));
break;
case THIS:
if ((mode & EXPR) != 0) {
@@ -1346,6 +1307,138 @@
}
}
+ /**
+ * If we see an identifier followed by a '<' it could be an unbound
+ * method reference or a binary expression. To disambiguate, look for a
+ * matching '>' and see if the subsequent terminal is either '.' or '#'.
+ */
+ @SuppressWarnings("fallthrough")
+ ParensResult analyzeParens() {
+ int depth = 0;
+ boolean type = false;
+ for (int lookahead = 0 ; ; lookahead++) {
+ TokenKind tk = S.token(lookahead).kind;
+ switch (tk) {
+ case EXTENDS: case SUPER: case COMMA:
+ type = true;
+ case QUES: case DOT: case AMP:
+ //skip
+ break;
+ case BYTE: case SHORT: case INT: case LONG: case FLOAT:
+ case DOUBLE: case BOOLEAN: case CHAR:
+ if (peekToken(lookahead, RPAREN)) {
+ //Type, ')' -> cast
+ return ParensResult.CAST;
+ } else if (peekToken(lookahead, IDENTIFIER)) {
+ //Type, 'Identifier -> explicit lambda
+ return ParensResult.EXPLICIT_LAMBDA;
+ }
+ break;
+ case LPAREN:
+ if (lookahead != 0) {
+ // '(' in a non-starting position -> parens
+ return ParensResult.PARENS;
+ } else if (peekToken(lookahead, RPAREN)) {
+ // '(', ')' -> explicit lambda
+ return ParensResult.EXPLICIT_LAMBDA;
+ }
+ break;
+ case RPAREN:
+ // if we have seen something that looks like a type,
+ // then it's a cast expression
+ if (type) return ParensResult.CAST;
+ // otherwise, disambiguate cast vs. parenthesized expression
+ // based on subsequent token.
+ switch (S.token(lookahead + 1).kind) {
+ /*case PLUSPLUS: case SUBSUB: */
+ case BANG: case TILDE:
+ case LPAREN: case THIS: case SUPER:
+ case INTLITERAL: case LONGLITERAL: case FLOATLITERAL:
+ case DOUBLELITERAL: case CHARLITERAL: case STRINGLITERAL:
+ case TRUE: case FALSE: case NULL:
+ case NEW: case IDENTIFIER: case ASSERT: case ENUM:
+ case BYTE: case SHORT: case CHAR: case INT:
+ case LONG: case FLOAT: case DOUBLE: case BOOLEAN: case VOID:
+ return ParensResult.CAST;
+ default:
+ return ParensResult.PARENS;
+ }
+ case IDENTIFIER:
+ if (peekToken(lookahead, IDENTIFIER)) {
+ // Identifier, Identifier -> explicit lambda
+ return ParensResult.EXPLICIT_LAMBDA;
+ } else if (peekToken(lookahead, RPAREN, ARROW)) {
+ // Identifier, ')' '->' -> implicit lambda
+ return ParensResult.IMPLICIT_LAMBDA;
+ }
+ break;
+ case FINAL:
+ case ELLIPSIS:
+ case MONKEYS_AT:
+ //those can only appear in explicit lambdas
+ return ParensResult.EXPLICIT_LAMBDA;
+ case LBRACKET:
+ if (peekToken(lookahead, RBRACKET, IDENTIFIER)) {
+ // '[', ']', Identifier -> explicit lambda
+ return ParensResult.EXPLICIT_LAMBDA;
+ } else if (peekToken(lookahead, RBRACKET, RPAREN) ||
+ peekToken(lookahead, RBRACKET, AMP)) {
+ // '[', ']', ')' -> cast
+ // '[', ']', '&' -> cast (intersection type)
+ return ParensResult.CAST;
+ } else if (peekToken(lookahead, RBRACKET)) {
+ //consume the ']' and skip
+ type = true;
+ lookahead++;
+ break;
+ } else {
+ return ParensResult.PARENS;
+ }
+ case LT:
+ depth++; break;
+ case GTGTGT:
+ depth--;
+ case GTGT:
+ depth--;
+ case GT:
+ depth--;
+ if (depth == 0) {
+ if (peekToken(lookahead, RPAREN) ||
+ peekToken(lookahead, AMP)) {
+ // '>', ')' -> cast
+ // '>', '&' -> cast
+ return ParensResult.CAST;
+ } else if (peekToken(lookahead, IDENTIFIER, COMMA) ||
+ peekToken(lookahead, IDENTIFIER, RPAREN, ARROW) ||
+ peekToken(lookahead, ELLIPSIS)) {
+ // '>', Identifier, ',' -> explicit lambda
+ // '>', Identifier, ')', '->' -> explicit lambda
+ // '>', '...' -> explicit lambda
+ return ParensResult.EXPLICIT_LAMBDA;
+ }
+ //it looks a type, but could still be (i) a cast to generic type,
+ //(ii) an unbound method reference or (iii) an explicit lambda
+ type = true;
+ break;
+ } else if (depth < 0) {
+ //unbalanced '<', '>' - not a generic type
+ return ParensResult.PARENS;
+ }
+ break;
+ default:
+ //this includes EOF
+ return ParensResult.PARENS;
+ }
+ }
+ }
+
+ enum ParensResult {
+ CAST,
+ EXPLICIT_LAMBDA,
+ IMPLICIT_LAMBDA,
+ PARENS;
+ }
+
JCExpression lambdaExpressionOrStatement(JCVariableDecl firstParam, int pos) {
ListBuffer<JCVariableDecl> params = new ListBuffer<JCVariableDecl>();
params.append(firstParam);
@@ -3171,21 +3264,12 @@
/** Check that given tree is a legal expression statement.
*/
protected JCExpression checkExprStat(JCExpression t) {
- switch(t.getTag()) {
- case PREINC: case PREDEC:
- case POSTINC: case POSTDEC:
- case ASSIGN:
- case BITOR_ASG: case BITXOR_ASG: case BITAND_ASG:
- case SL_ASG: case SR_ASG: case USR_ASG:
- case PLUS_ASG: case MINUS_ASG:
- case MUL_ASG: case DIV_ASG: case MOD_ASG:
- case APPLY: case NEWCLASS:
- case ERRONEOUS:
- return t;
- default:
+ if (!TreeInfo.isExpressionStatement(t)) {
JCExpression ret = F.at(t.pos).Erroneous(List.<JCTree>of(t));
error(ret, "not.stmt");
return ret;
+ } else {
+ return t;
}
}
@@ -3395,6 +3479,12 @@
allowDefaultMethods = true;
}
}
+ void checkIntersectionTypesInCast() {
+ if (!allowIntersectionTypesInCast) {
+ log.error(token.pos, "intersection.types.in.cast.not.supported.in.source", source.name);
+ allowIntersectionTypesInCast = true;
+ }
+ }
/*
* a functional source tree and end position mappings
--- a/langtools/src/share/classes/com/sun/tools/javac/resources/compiler.properties Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/resources/compiler.properties Mon Dec 17 08:30:06 2012 -0500
@@ -187,8 +187,9 @@
{0}
# 0: symbol, 1: symbol kind, 2: symbol
-compiler.misc.invalid.generic.desc.in.functional.intf=\
- invalid functional descriptor: method {0} in {1} {2} is generic
+compiler.misc.invalid.generic.lambda.target=\
+ invalid functional descriptor for lambda expression\n\
+ method {0} in {1} {2} is generic
# 0: symbol kind, 1: symbol
compiler.misc.incompatible.descs.in.functional.intf=\
@@ -206,6 +207,10 @@
compiler.misc.no.suitable.functional.intf.inst=\
cannot infer functional interface descriptor for {0}
+# 0: type
+compiler.misc.secondary.bound.must.be.marker.intf=\
+ secondary bound {0} must be a marker interface
+
# 0: symbol kind, 1: message segment
compiler.err.invalid.mref=\
invalid {0} reference; {1}
@@ -214,6 +219,12 @@
compiler.misc.invalid.mref=\
invalid {0} reference; {1}
+compiler.misc.static.mref.with.targs=\
+ parameterized qualifier on static method reference
+
+compiler.misc.static.bound.mref=\
+ static bound method reference
+
# 0: symbol
compiler.err.cant.assign.val.to.final.var=\
cannot assign a value to final variable {0}
@@ -2196,6 +2207,11 @@
default methods are not supported in -source {0}\n\
(use -source 8 or higher to enable default methods)
+# 0: string
+compiler.err.intersection.types.in.cast.not.supported.in.source=\
+ intersection types in cast are not supported in -source {0}\n\
+ (use -source 8 or higher to enable default methods)
+
########################################
# Diagnostics for verbose resolution
# used by Resolve (debug only)
--- a/langtools/src/share/classes/com/sun/tools/javac/tree/JCTree.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/tree/JCTree.java Mon Dec 17 08:30:06 2012 -0500
@@ -254,6 +254,10 @@
*/
TYPEUNION,
+ /** Intersection types, of type TypeIntersection
+ */
+ TYPEINTERSECTION,
+
/** Formal type parameters, of type TypeParameter.
*/
TYPEPARAMETER,
@@ -1829,8 +1833,6 @@
STATIC(ReferenceMode.INVOKE, false),
/** Expr # instMethod */
BOUND(ReferenceMode.INVOKE, false),
- /** Expr # staticMethod */
- STATIC_EVAL(ReferenceMode.INVOKE, false),
/** Inner # new */
IMPLICIT_INNER(ReferenceMode.NEW, false),
/** Toplevel # new */
@@ -2064,6 +2066,34 @@
}
/**
+ * An intersection type, T1 & T2 & ... Tn (used in cast expressions)
+ */
+ public static class JCTypeIntersection extends JCExpression implements IntersectionTypeTree {
+
+ public List<JCExpression> bounds;
+
+ protected JCTypeIntersection(List<JCExpression> bounds) {
+ this.bounds = bounds;
+ }
+ @Override
+ public void accept(Visitor v) { v.visitTypeIntersection(this); }
+
+ public Kind getKind() { return Kind.INTERSECTION_TYPE; }
+
+ public List<JCExpression> getBounds() {
+ return bounds;
+ }
+ @Override
+ public <R,D> R accept(TreeVisitor<R,D> v, D d) {
+ return v.visitIntersectionType(this, d);
+ }
+ @Override
+ public Tag getTag() {
+ return TYPEINTERSECTION;
+ }
+ }
+
+ /**
* A formal class parameter.
*/
public static class JCTypeParameter extends JCTree implements TypeParameterTree {
@@ -2385,6 +2415,7 @@
public void visitTypeArray(JCArrayTypeTree that) { visitTree(that); }
public void visitTypeApply(JCTypeApply that) { visitTree(that); }
public void visitTypeUnion(JCTypeUnion that) { visitTree(that); }
+ public void visitTypeIntersection(JCTypeIntersection that) { visitTree(that); }
public void visitTypeParameter(JCTypeParameter that) { visitTree(that); }
public void visitWildcard(JCWildcard that) { visitTree(that); }
public void visitTypeBoundKind(TypeBoundKind that) { visitTree(that); }
--- a/langtools/src/share/classes/com/sun/tools/javac/tree/Pretty.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/tree/Pretty.java Mon Dec 17 08:30:06 2012 -0500
@@ -1249,6 +1249,14 @@
}
}
+ public void visitTypeIntersection(JCTypeIntersection tree) {
+ try {
+ printExprs(tree.bounds, " & ");
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ }
+
public void visitTypeParameter(JCTypeParameter tree) {
try {
print(tree.name);
--- a/langtools/src/share/classes/com/sun/tools/javac/tree/TreeCopier.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/tree/TreeCopier.java Mon Dec 17 08:30:06 2012 -0500
@@ -358,6 +358,12 @@
return M.at(t.pos).TypeUnion(components);
}
+ public JCTree visitIntersectionType(IntersectionTypeTree node, P p) {
+ JCTypeIntersection t = (JCTypeIntersection) node;
+ List<JCExpression> bounds = copy(t.bounds, p);
+ return M.at(t.pos).TypeIntersection(bounds);
+ }
+
public JCTree visitArrayType(ArrayTypeTree node, P p) {
JCArrayTypeTree t = (JCArrayTypeTree) node;
JCExpression elemtype = copy(t.elemtype, p);
--- a/langtools/src/share/classes/com/sun/tools/javac/tree/TreeInfo.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/tree/TreeInfo.java Mon Dec 17 08:30:06 2012 -0500
@@ -267,6 +267,25 @@
return lambda.params.isEmpty() ||
lambda.params.head.vartype != null;
}
+
+ /** Return true if the tree corresponds to an expression statement */
+ public static boolean isExpressionStatement(JCExpression tree) {
+ switch(tree.getTag()) {
+ case PREINC: case PREDEC:
+ case POSTINC: case POSTDEC:
+ case ASSIGN:
+ case BITOR_ASG: case BITXOR_ASG: case BITAND_ASG:
+ case SL_ASG: case SR_ASG: case USR_ASG:
+ case PLUS_ASG: case MINUS_ASG:
+ case MUL_ASG: case DIV_ASG: case MOD_ASG:
+ case APPLY: case NEWCLASS:
+ case ERRONEOUS:
+ return true;
+ default:
+ return false;
+ }
+ }
+
/**
* Return true if the AST corresponds to a static select of the kind A.B
*/
--- a/langtools/src/share/classes/com/sun/tools/javac/tree/TreeMaker.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/tree/TreeMaker.java Mon Dec 17 08:30:06 2012 -0500
@@ -456,6 +456,12 @@
return tree;
}
+ public JCTypeIntersection TypeIntersection(List<JCExpression> components) {
+ JCTypeIntersection tree = new JCTypeIntersection(components);
+ tree.pos = pos;
+ return tree;
+ }
+
public JCTypeParameter TypeParameter(Name name, List<JCExpression> bounds) {
JCTypeParameter tree = new JCTypeParameter(name, bounds);
tree.pos = pos;
--- a/langtools/src/share/classes/com/sun/tools/javac/tree/TreeScanner.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/tree/TreeScanner.java Mon Dec 17 08:30:06 2012 -0500
@@ -286,6 +286,10 @@
scan(tree.alternatives);
}
+ public void visitTypeIntersection(JCTypeIntersection tree) {
+ scan(tree.bounds);
+ }
+
public void visitTypeParameter(JCTypeParameter tree) {
scan(tree.bounds);
}
--- a/langtools/src/share/classes/com/sun/tools/javac/tree/TreeTranslator.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/com/sun/tools/javac/tree/TreeTranslator.java Mon Dec 17 08:30:06 2012 -0500
@@ -379,6 +379,11 @@
result = tree;
}
+ public void visitTypeIntersection(JCTypeIntersection tree) {
+ tree.bounds = translate(tree.bounds);
+ result = tree;
+ }
+
public void visitTypeParameter(JCTypeParameter tree) {
tree.bounds = translate(tree.bounds);
result = tree;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/src/share/classes/javax/lang/model/type/IntersectionType.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package javax.lang.model.type;
+
+import java.util.List;
+
+/**
+ * Represents an intersection type.
+ *
+ * As of the {@link javax.lang.model.SourceVersion#RELEASE_8
+ * RELEASE_8} source version, intersection types can appear as the target type
+ * of a cast expression.
+ *
+ * @since 1.8
+ */
+public interface IntersectionType extends TypeMirror {
+
+ /**
+ * Return the bounds comprising this intersection type.
+ *
+ * @return the bounds of this intersection types.
+ */
+ List<? extends TypeMirror> getBounds();
+}
--- a/langtools/src/share/classes/javax/lang/model/type/TypeKind.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/javax/lang/model/type/TypeKind.java Mon Dec 17 08:30:06 2012 -0500
@@ -144,7 +144,14 @@
*
* @since 1.7
*/
- UNION;
+ UNION,
+
+ /**
+ * An intersection type.
+ *
+ * @since 1.8
+ */
+ INTERSECTION;
/**
* Returns {@code true} if this kind corresponds to a primitive
--- a/langtools/src/share/classes/javax/lang/model/type/TypeVisitor.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/javax/lang/model/type/TypeVisitor.java Mon Dec 17 08:30:06 2012 -0500
@@ -172,4 +172,14 @@
* @since 1.7
*/
R visitUnion(UnionType t, P p);
+
+ /**
+ * Visits an intersection type.
+ *
+ * @param t the type to visit
+ * @param p a visitor-specified parameter
+ * @return a visitor-specified result
+ * @since 1.8
+ */
+ R visitIntersection(IntersectionType t, P p);
}
--- a/langtools/src/share/classes/javax/lang/model/util/AbstractTypeVisitor6.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/javax/lang/model/util/AbstractTypeVisitor6.java Mon Dec 17 08:30:06 2012 -0500
@@ -111,6 +111,20 @@
}
/**
+ * Visits an {@code IntersectionType} element by calling {@code
+ * visitUnknown}.
+
+ * @param t {@inheritDoc}
+ * @param p {@inheritDoc}
+ * @return the result of {@code visitUnknown}
+ *
+ * @since 1.8
+ */
+ public R visitIntersection(IntersectionType t, P p) {
+ return visitUnknown(t, p);
+ }
+
+ /**
* {@inheritDoc}
*
* <p> The default implementation of this method in {@code
--- a/langtools/src/share/classes/javax/lang/model/util/AbstractTypeVisitor8.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/javax/lang/model/util/AbstractTypeVisitor8.java Mon Dec 17 08:30:06 2012 -0500
@@ -66,4 +66,13 @@
protected AbstractTypeVisitor8() {
super();
}
+
+ /**
+ * Visits an {@code IntersectionType} in a manner defined by a subclass.
+ *
+ * @param t {@inheritDoc}
+ * @param p {@inheritDoc}
+ * @return the result of the visit as defined by a subclass
+ */
+ public abstract R visitIntersection(IntersectionType t, P p);
}
--- a/langtools/src/share/classes/javax/tools/JavaCompiler.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/src/share/classes/javax/tools/JavaCompiler.java Mon Dec 17 08:30:06 2012 -0500
@@ -108,8 +108,8 @@
* example a recommended coding pattern:
*
* <pre>
- * Files[] files1 = ... ; // input for first compilation task
- * Files[] files2 = ... ; // input for second compilation task
+ * File[] files1 = ... ; // input for first compilation task
+ * File[] files2 = ... ; // input for second compilation task
*
* JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
* StandardJavaFileManager fileManager = compiler.getStandardFileManager(null, null, null);
@@ -165,7 +165,7 @@
* JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
* StandardJavaFileManager stdFileManager = compiler.getStandardFileManager(null, null, null);
* JavaFileManager fileManager = new ForwardingJavaFileManager(stdFileManager) {
- * public void flush() {
+ * public void flush() throws IOException {
* logger.entering(StandardJavaFileManager.class.getName(), "flush");
* super.flush();
* logger.exiting(StandardJavaFileManager.class.getName(), "flush");
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/7144981/IgnoreIgnorableCharactersInInput.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,92 @@
+
+/*
+ * @test /nodynamiccopyright/
+ * @bug 7144981
+ * @summary javac should ignore ignorable characters in input
+ * @run main IgnoreIgnorableCharactersInInput
+ */
+
+import com.sun.source.util.JavacTask;
+import java.io.File;
+import java.net.URI;
+import java.util.Arrays;
+import java.util.Set;
+import java.util.TreeSet;
+import javax.tools.JavaCompiler;
+import javax.tools.JavaFileObject;
+import javax.tools.SimpleJavaFileObject;
+import javax.tools.ToolProvider;
+
+public class IgnoreIgnorableCharactersInInput {
+
+ public static void main(String... args) throws Exception {
+ new IgnoreIgnorableCharactersInInput().run();
+ }
+
+ void run() throws Exception {
+ JavaCompiler comp = ToolProvider.getSystemJavaCompiler();
+ File classesDir = new File(System.getProperty("user.dir"), "classes");
+ classesDir.mkdirs();
+ JavaSource[] sources = new JavaSource[]{
+ new JavaSource("TestOneIgnorableChar", "AA\\u0000BB"),
+ new JavaSource("TestMultipleIgnorableChar", "AA\\u0000\\u0000\\u0000BB")};
+ JavacTask ct = (JavacTask)comp.getTask(null, null, null,
+ Arrays.asList("-d", classesDir.getPath()),
+ null, Arrays.asList(sources));
+ try {
+ if (!ct.call()) {
+ throw new AssertionError("Error thrown when compiling test cases");
+ }
+ } catch (Throwable ex) {
+ throw new AssertionError("Error thrown when compiling test cases");
+ }
+ check(classesDir,
+ "TestOneIgnorableChar.class",
+ "TestOneIgnorableChar$AABB.class",
+ "TestMultipleIgnorableChar.class",
+ "TestMultipleIgnorableChar$AABB.class");
+ if (errors > 0)
+ throw new AssertionError("There are some errors in the test check the error output");
+ }
+
+ /**
+ * Check that a directory contains the expected files.
+ */
+ void check(File dir, String... paths) {
+ Set<String> found = new TreeSet<String>(Arrays.asList(dir.list()));
+ Set<String> expect = new TreeSet<String>(Arrays.asList(paths));
+ if (found.equals(expect))
+ return;
+ for (String f: found) {
+ if (!expect.contains(f))
+ error("Unexpected file found: " + f);
+ }
+ for (String e: expect) {
+ if (!found.contains(e))
+ error("Expected file not found: " + e);
+ }
+ }
+
+ int errors;
+
+ void error(String msg) {
+ System.err.println(msg);
+ errors++;
+ }
+
+ class JavaSource extends SimpleJavaFileObject {
+
+ String internalSource =
+ "public class #O {public class #I {} }";
+ public JavaSource(String outerClassName, String innerClassName) {
+ super(URI.create(outerClassName + ".java"), JavaFileObject.Kind.SOURCE);
+ internalSource =
+ internalSource.replace("#O", outerClassName).replace("#I", innerClassName);
+ }
+
+ @Override
+ public CharSequence getCharContent(boolean ignoreEncodingErrors) {
+ return internalSource;
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/7153958/CPoolRefClassContainingInlinedCts.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 7153958
+ * @summary add constant pool reference to class containing inlined constants
+ * @compile pkg/ClassToBeStaticallyImported.java
+ * @run main CPoolRefClassContainingInlinedCts
+ */
+
+import com.sun.tools.classfile.ClassFile;
+import com.sun.tools.classfile.ConstantPool.CONSTANT_Class_info;
+import com.sun.tools.classfile.ConstantPool.CPInfo;
+import com.sun.tools.classfile.ConstantPoolException;
+import java.io.File;
+import java.io.IOException;
+
+import static pkg.ClassToBeStaticallyImported.staticField;
+
+public class CPoolRefClassContainingInlinedCts {
+
+ public static void main(String args[]) throws Exception {
+ new CPoolRefClassContainingInlinedCts().run();
+ }
+
+ void run() throws Exception {
+ checkReferences();
+ }
+
+ int numberOfReferencedClassesToBeChecked = 0;
+
+ void checkClassName(String className) {
+ switch (className) {
+ case "SimpleAssignClass" : case "BinaryExpClass":
+ case "UnaryExpClass" : case "CastClass":
+ case "ParensClass" : case "CondClass":
+ case "IfClass" : case "pkg/ClassToBeStaticallyImported":
+ numberOfReferencedClassesToBeChecked++;
+ }
+ }
+
+ void checkReferences() throws IOException, ConstantPoolException {
+ File testClasses = new File(System.getProperty("test.classes"));
+ File file = new File(testClasses,
+ CPoolRefClassContainingInlinedCts.class.getName() + ".class");
+ ClassFile classFile = ClassFile.read(file);
+ int i = 1;
+ CPInfo cpInfo;
+ while (i < classFile.constant_pool.size()) {
+ cpInfo = classFile.constant_pool.get(i);
+ if (cpInfo instanceof CONSTANT_Class_info) {
+ checkClassName(((CONSTANT_Class_info)cpInfo).getName());
+ }
+ i += cpInfo.size();
+ }
+ if (numberOfReferencedClassesToBeChecked != 8) {
+ throw new AssertionError("Class reference missing in the constant pool");
+ }
+ }
+
+ private int assign = SimpleAssignClass.x;
+ private int binary = BinaryExpClass.x + 1;
+ private int unary = -UnaryExpClass.x;
+ private int cast = (int)CastClass.x;
+ private int parens = (ParensClass.x);
+ private int cond = (CondClass.x == 1) ? 1 : 2;
+ private static int ifConstant;
+ private static int importStatic;
+ static {
+ if (IfClass.x == 1) {
+ ifConstant = 1;
+ } else {
+ ifConstant = 2;
+ }
+ }
+ static {
+ if (staticField == 1) {
+ importStatic = 1;
+ } else {
+ importStatic = 2;
+ }
+ }
+}
+
+class SimpleAssignClass {
+ public static final int x = 1;
+}
+
+class BinaryExpClass {
+ public static final int x = 1;
+}
+
+class UnaryExpClass {
+ public static final int x = 1;
+}
+
+class CastClass {
+ public static final int x = 1;
+}
+
+class ParensClass {
+ public static final int x = 1;
+}
+
+class CondClass {
+ public static final int x = 1;
+}
+
+class IfClass {
+ public static final int x = 1;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/7153958/pkg/ClassToBeStaticallyImported.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package pkg;
+
+public class ClassToBeStaticallyImported {
+ public static final int staticField = 1;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/cast/intersection/IntersectionTypeCastTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,330 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8002099
+ * @summary Add support for intersection types in cast expression
+ */
+
+import com.sun.source.util.JavacTask;
+import com.sun.tools.javac.util.List;
+import com.sun.tools.javac.util.ListBuffer;
+import java.net.URI;
+import java.util.Arrays;
+import javax.tools.Diagnostic;
+import javax.tools.JavaCompiler;
+import javax.tools.JavaFileObject;
+import javax.tools.SimpleJavaFileObject;
+import javax.tools.StandardJavaFileManager;
+import javax.tools.ToolProvider;
+
+public class IntersectionTypeCastTest {
+
+ static int checkCount = 0;
+
+ interface Type {
+ boolean subtypeOf(Type that);
+ String asString();
+ boolean isClass();
+ boolean isInterface();
+ }
+
+ enum InterfaceKind implements Type {
+ A("interface A { }\n", "A", null),
+ B("interface B { }\n", "B", null),
+ C("interface C extends A { }\n", "C", A);
+
+ String declStr;
+ String typeStr;
+ InterfaceKind superInterface;
+
+ InterfaceKind(String declStr, String typeStr, InterfaceKind superInterface) {
+ this.declStr = declStr;
+ this.typeStr = typeStr;
+ this.superInterface = superInterface;
+ }
+
+ @Override
+ public boolean subtypeOf(Type that) {
+ return this == that || superInterface == that || that == ClassKind.OBJECT;
+ }
+
+ @Override
+ public String asString() {
+ return typeStr;
+ }
+
+ @Override
+ public boolean isClass() {
+ return false;
+ }
+
+ @Override
+ public boolean isInterface() {
+ return true;
+ }
+ }
+
+ enum ClassKind implements Type {
+ OBJECT(null, "Object"),
+ CA("#M class CA implements A { }\n", "CA", InterfaceKind.A),
+ CB("#M class CB implements B { }\n", "CB", InterfaceKind.B),
+ CAB("#M class CAB implements A, B { }\n", "CAB", InterfaceKind.A, InterfaceKind.B),
+ CC("#M class CC implements C { }\n", "CC", InterfaceKind.C, InterfaceKind.A),
+ CCA("#M class CCA implements C, A { }\n", "CCA", InterfaceKind.C, InterfaceKind.A),
+ CCB("#M class CCB implements C, B { }\n", "CCB", InterfaceKind.C, InterfaceKind.A, InterfaceKind.B),
+ CCAB("#M class CCAB implements C, A, B { }\n", "CCAB", InterfaceKind.C, InterfaceKind.A, InterfaceKind.B);
+
+ String declTemplate;
+ String typeStr;
+ List<InterfaceKind> superInterfaces;
+
+ ClassKind(String declTemplate, String typeStr, InterfaceKind... superInterfaces) {
+ this.declTemplate = declTemplate;
+ this.typeStr = typeStr;
+ this.superInterfaces = List.from(superInterfaces);
+ }
+
+ String getDecl(ModifierKind mod) {
+ return declTemplate != null ?
+ declTemplate.replaceAll("#M", mod.modStr) :
+ "";
+ }
+
+ @Override
+ public boolean subtypeOf(Type that) {
+ return this == that || superInterfaces.contains(that) || that == OBJECT;
+ }
+
+ @Override
+ public String asString() {
+ return typeStr;
+ }
+
+ @Override
+ public boolean isClass() {
+ return true;
+ }
+
+ @Override
+ public boolean isInterface() {
+ return false;
+ }
+ }
+
+ enum ModifierKind {
+ NONE(""),
+ FINAL("final");
+
+ String modStr;
+
+ ModifierKind(String modStr) {
+ this.modStr = modStr;
+ }
+ }
+
+ enum CastKind {
+ CLASS("(#C)", 0),
+ INTERFACE("(#I0)", 1),
+ INTERSECTION2("(#C & #I0)", 1),
+ INTERSECTION3("(#C & #I0 & #I1)", 2);
+ //INTERSECTION4("(#C & #I0 & #I1 & #I2)", 3);
+
+ String castTemplate;
+ int interfaceBounds;
+
+ CastKind(String castTemplate, int interfaceBounds) {
+ this.castTemplate = castTemplate;
+ this.interfaceBounds = interfaceBounds;
+ }
+ }
+
+ static class CastInfo {
+ CastKind kind;
+ Type[] types;
+
+ CastInfo(CastKind kind, Type... types) {
+ this.kind = kind;
+ this.types = types;
+ }
+
+ String getCast() {
+ String temp = kind.castTemplate.replaceAll("#C", types[0].asString());
+ for (int i = 0; i < kind.interfaceBounds ; i++) {
+ temp = temp.replace(String.format("#I%d", i), types[i + 1].asString());
+ }
+ return temp;
+ }
+
+ boolean hasDuplicateTypes() {
+ for (int i = 0 ; i < types.length ; i++) {
+ for (int j = 0 ; j < types.length ; j++) {
+ if (i != j && types[i] == types[j]) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ boolean compatibleWith(ModifierKind mod, CastInfo that) {
+ for (Type t1 : types) {
+ for (Type t2 : that.types) {
+ boolean compat =
+ t1.subtypeOf(t2) ||
+ t2.subtypeOf(t1) ||
+ (t1.isInterface() && t2.isInterface()) || //side-cast (1)
+ (mod == ModifierKind.NONE && (t1.isInterface() != t2.isInterface())); //side-cast (2)
+ if (!compat) return false;
+ }
+ }
+ return true;
+ }
+ }
+
+ public static void main(String... args) throws Exception {
+ //create default shared JavaCompiler - reused across multiple compilations
+ JavaCompiler comp = ToolProvider.getSystemJavaCompiler();
+ StandardJavaFileManager fm = comp.getStandardFileManager(null, null, null);
+
+ for (ModifierKind mod : ModifierKind.values()) {
+ for (CastInfo cast1 : allCastInfo()) {
+ for (CastInfo cast2 : allCastInfo()) {
+ new IntersectionTypeCastTest(mod, cast1, cast2).run(comp, fm);
+ }
+ }
+ }
+ System.out.println("Total check executed: " + checkCount);
+ }
+
+ static List<CastInfo> allCastInfo() {
+ ListBuffer<CastInfo> buf = ListBuffer.lb();
+ for (CastKind kind : CastKind.values()) {
+ for (ClassKind clazz : ClassKind.values()) {
+ if (kind == CastKind.INTERFACE && clazz != ClassKind.OBJECT) {
+ continue;
+ } else if (kind.interfaceBounds == 0) {
+ buf.append(new CastInfo(kind, clazz));
+ continue;
+ } else {
+ for (InterfaceKind intf1 : InterfaceKind.values()) {
+ if (kind.interfaceBounds == 1) {
+ buf.append(new CastInfo(kind, clazz, intf1));
+ continue;
+ } else {
+ for (InterfaceKind intf2 : InterfaceKind.values()) {
+ if (kind.interfaceBounds == 2) {
+ buf.append(new CastInfo(kind, clazz, intf1, intf2));
+ continue;
+ } else {
+ for (InterfaceKind intf3 : InterfaceKind.values()) {
+ buf.append(new CastInfo(kind, clazz, intf1, intf2, intf3));
+ continue;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return buf.toList();
+ }
+
+ ModifierKind mod;
+ CastInfo cast1, cast2;
+ JavaSource source;
+ DiagnosticChecker diagChecker;
+
+ IntersectionTypeCastTest(ModifierKind mod, CastInfo cast1, CastInfo cast2) {
+ this.mod = mod;
+ this.cast1 = cast1;
+ this.cast2 = cast2;
+ this.source = new JavaSource();
+ this.diagChecker = new DiagnosticChecker();
+ }
+
+ class JavaSource extends SimpleJavaFileObject {
+
+ String bodyTemplate = "class Test {\n" +
+ " void test() {\n" +
+ " Object o = #C1#C2null;\n" +
+ " } }";
+
+ String source = "";
+
+ public JavaSource() {
+ super(URI.create("myfo:/Test.java"), JavaFileObject.Kind.SOURCE);
+ for (ClassKind ck : ClassKind.values()) {
+ source += ck.getDecl(mod);
+ }
+ for (InterfaceKind ik : InterfaceKind.values()) {
+ source += ik.declStr;
+ }
+ source += bodyTemplate.replaceAll("#C1", cast1.getCast()).replaceAll("#C2", cast2.getCast());
+ }
+
+ @Override
+ public CharSequence getCharContent(boolean ignoreEncodingErrors) {
+ return source;
+ }
+ }
+
+ void run(JavaCompiler tool, StandardJavaFileManager fm) throws Exception {
+ JavacTask ct = (JavacTask)tool.getTask(null, fm, diagChecker,
+ Arrays.asList("-XDallowIntersectionTypes"), null, Arrays.asList(source));
+ try {
+ ct.analyze();
+ } catch (Throwable ex) {
+ throw new AssertionError("Error thrown when compiling the following code:\n" + source.getCharContent(true));
+ }
+ check();
+ }
+
+ void check() {
+ checkCount++;
+
+ boolean errorExpected = cast1.hasDuplicateTypes() || cast2.hasDuplicateTypes();
+
+ errorExpected |= !cast2.compatibleWith(mod, cast1);
+
+ if (errorExpected != diagChecker.errorFound) {
+ throw new Error("invalid diagnostics for source:\n" +
+ source.getCharContent(true) +
+ "\nFound error: " + diagChecker.errorFound +
+ "\nExpected error: " + errorExpected);
+ }
+ }
+
+ static class DiagnosticChecker implements javax.tools.DiagnosticListener<JavaFileObject> {
+
+ boolean errorFound;
+
+ public void report(Diagnostic<? extends JavaFileObject> diagnostic) {
+ if (diagnostic.getKind() == Diagnostic.Kind.ERROR) {
+ errorFound = true;
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/cast/intersection/IntersectionTypeParserTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8002099
+ * @summary Add support for intersection types in cast expression
+ */
+
+import com.sun.source.util.JavacTask;
+import java.net.URI;
+import java.util.Arrays;
+import javax.tools.Diagnostic;
+import javax.tools.JavaCompiler;
+import javax.tools.JavaFileObject;
+import javax.tools.SimpleJavaFileObject;
+import javax.tools.StandardJavaFileManager;
+import javax.tools.ToolProvider;
+
+public class IntersectionTypeParserTest {
+
+ static int checkCount = 0;
+
+ enum TypeKind {
+ SIMPLE("A"),
+ GENERIC("A<X>"),
+ WILDCARD("A<? super X, ? extends Y>");
+
+ String typeStr;
+
+ TypeKind(String typeStr) {
+ this.typeStr = typeStr;
+ }
+ }
+
+ enum ArrayKind {
+ NONE(""),
+ SINGLE("[]"),
+ DOUBLE("[][]");
+
+ String arrStr;
+
+ ArrayKind(String arrStr) {
+ this.arrStr = arrStr;
+ }
+ }
+
+ static class Type {
+ TypeKind tk;
+ ArrayKind ak;
+
+ Type(TypeKind tk, ArrayKind ak) {
+ this.tk = tk;
+ this.ak = ak;
+ }
+
+ String asString() {
+ return tk.typeStr + ak.arrStr;
+ }
+ }
+
+ enum CastKind {
+ ONE("(#T0)", 1),
+ TWO("(#T0 & T1)", 2),
+ THREE("(#T0 & #T1 & #T2)", 3);
+
+ String castTemplate;
+ int nBounds;
+
+ CastKind(String castTemplate, int nBounds) {
+ this.castTemplate = castTemplate;
+ this.nBounds = nBounds;
+ }
+
+ String asString(Type... types) {
+ String res = castTemplate;
+ for (int i = 0; i < nBounds ; i++) {
+ res = res.replaceAll(String.format("#T%d", i), types[i].asString());
+ }
+ return res;
+ }
+ }
+
+ public static void main(String... args) throws Exception {
+ //create default shared JavaCompiler - reused across multiple compilations
+ JavaCompiler comp = ToolProvider.getSystemJavaCompiler();
+ StandardJavaFileManager fm = comp.getStandardFileManager(null, null, null);
+
+ for (CastKind ck : CastKind.values()) {
+ for (TypeKind t1 : TypeKind.values()) {
+ for (ArrayKind ak1 : ArrayKind.values()) {
+ Type typ1 = new Type(t1, ak1);
+ if (ck.nBounds == 1) {
+ new IntersectionTypeParserTest(ck, typ1).run(comp, fm);
+ continue;
+ }
+ for (TypeKind t2 : TypeKind.values()) {
+ for (ArrayKind ak2 : ArrayKind.values()) {
+ Type typ2 = new Type(t2, ak2);
+ if (ck.nBounds == 2) {
+ new IntersectionTypeParserTest(ck, typ1, typ2).run(comp, fm);
+ continue;
+ }
+ for (TypeKind t3 : TypeKind.values()) {
+ for (ArrayKind ak3 : ArrayKind.values()) {
+ Type typ3 = new Type(t3, ak3);
+ new IntersectionTypeParserTest(ck, typ1, typ2, typ3).run(comp, fm);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ System.out.println("Total check executed: " + checkCount);
+ }
+
+ CastKind ck;
+ Type[] types;
+ JavaSource source;
+ DiagnosticChecker diagChecker;
+
+ IntersectionTypeParserTest(CastKind ck, Type... types) {
+ this.ck = ck;
+ this.types = types;
+ this.source = new JavaSource();
+ this.diagChecker = new DiagnosticChecker();
+ }
+
+ class JavaSource extends SimpleJavaFileObject {
+
+ String bodyTemplate = "class Test {\n" +
+ " void test() {\n" +
+ " Object o = #Cnull;\n" +
+ " } }";
+
+ String source = "";
+
+ public JavaSource() {
+ super(URI.create("myfo:/Test.java"), JavaFileObject.Kind.SOURCE);
+ source += bodyTemplate.replaceAll("#C", ck.asString(types));
+ }
+
+ @Override
+ public CharSequence getCharContent(boolean ignoreEncodingErrors) {
+ return source;
+ }
+ }
+
+ void run(JavaCompiler tool, StandardJavaFileManager fm) throws Exception {
+ checkCount++;
+ JavacTask ct = (JavacTask)tool.getTask(null, fm, diagChecker,
+ Arrays.asList("-XDallowIntersectionTypes"), null, Arrays.asList(source));
+ ct.parse();
+ if (diagChecker.errorFound) {
+ throw new Error("Unexpected parser error for source:\n" +
+ source.getCharContent(true));
+ }
+ }
+
+ static class DiagnosticChecker implements javax.tools.DiagnosticListener<JavaFileObject> {
+
+ boolean errorFound;
+
+ public void report(Diagnostic<? extends JavaFileObject> diagnostic) {
+ if (diagnostic.getKind() == Diagnostic.Kind.ERROR) {
+ errorFound = true;
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/cast/intersection/model/Check.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * Annotation used by ModelChecker to mark the class whose model is to be checked
+ */
+@interface Check {}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/cast/intersection/model/IntersectionTypeInfo.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * Used by ModelChecker to validate the modeling information of a union type.
+ */
+@interface IntersectionTypeInfo {
+ String[] value();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/cast/intersection/model/Member.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import javax.lang.model.element.ElementKind;
+
+/**
+ * Annotation used by ModelChecker to mark a member that is to be checked
+ */
+@interface Member {
+ ElementKind value();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/cast/intersection/model/Model01.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8002099
+ * @summary Add support for intersection types in cast expression
+ * @library ../../../lib
+ * @build JavacTestingAbstractProcessor ModelChecker
+ * @compile -XDallowIntersectionTypes -processor ModelChecker Model01.java
+ */
+
+import javax.lang.model.element.ElementKind;
+
+@Check
+class Test {
+
+ interface A {
+ @Member(ElementKind.METHOD)
+ public void m1();
+ }
+
+ interface B {
+ @Member(ElementKind.METHOD)
+ public void m2();
+ }
+
+ void test(){
+ @IntersectionTypeInfo({"java.lang.Object", "Test.A", "Test.B"})
+ Object o = (A & B)null;
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/cast/intersection/model/ModelChecker.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.sun.source.tree.ExpressionTree;
+import com.sun.source.tree.Tree;
+import com.sun.source.tree.TypeCastTree;
+import com.sun.source.tree.VariableTree;
+import com.sun.source.util.TreePathScanner;
+import com.sun.source.util.Trees;
+import com.sun.source.util.TreePath;
+import com.sun.tools.javac.tree.JCTree.JCExpression;
+
+import java.util.Set;
+
+import javax.annotation.processing.RoundEnvironment;
+import javax.annotation.processing.SupportedAnnotationTypes;
+import javax.lang.model.element.Element;
+import javax.lang.model.element.TypeElement;
+import javax.lang.model.type.TypeMirror;
+import javax.lang.model.type.TypeKind;
+import javax.lang.model.type.IntersectionType;
+import javax.lang.model.type.UnknownTypeException;
+import javax.lang.model.util.SimpleTypeVisitor6;
+import javax.lang.model.util.SimpleTypeVisitor7;
+
+@SupportedAnnotationTypes("Check")
+public class ModelChecker extends JavacTestingAbstractProcessor {
+
+ @Override
+ public boolean process(Set<? extends TypeElement> annotations, RoundEnvironment roundEnv) {
+ if (roundEnv.processingOver())
+ return true;
+
+ Trees trees = Trees.instance(processingEnv);
+
+ TypeElement testAnno = elements.getTypeElement("Check");
+ for (Element elem: roundEnv.getElementsAnnotatedWith(testAnno)) {
+ TreePath p = trees.getPath(elem);
+ new IntersectionCastTester(trees).scan(p, null);
+ }
+ return true;
+ }
+
+ class IntersectionCastTester extends TreePathScanner<Void, Void> {
+ Trees trees;
+
+ public IntersectionCastTester(Trees trees) {
+ super();
+ this.trees = trees;
+ }
+
+ @Override
+ public Void visitVariable(VariableTree node, Void p) {
+
+ TreePath varPath = new TreePath(getCurrentPath(), node);
+ Element v = trees.getElement(varPath);
+
+ IntersectionTypeInfo it = v.getAnnotation(IntersectionTypeInfo.class);
+ assertTrue(it != null, "IntersectionType annotation must be present");
+
+ ExpressionTree varInit = node.getInitializer();
+ assertTrue(varInit != null && varInit.getKind() == Tree.Kind.TYPE_CAST,
+ "variable must have be initialized to an expression containing an intersection type cast");
+
+ TypeMirror t = ((JCExpression)((TypeCastTree)varInit).getType()).type;
+
+ validateIntersectionTypeInfo(t, it);
+
+ for (Element e2 : types.asElement(t).getEnclosedElements()) {
+ assertTrue(false, "an intersection type has no declared members");
+ }
+
+ for (Element e2 : elements.getAllMembers((TypeElement)types.asElement(t))) {
+ Member m = e2.getAnnotation(Member.class);
+ if (m != null) {
+ assertTrue(e2.getKind() == m.value(), "Expected " + m.value() + " - found " + e2.getKind());
+ }
+ }
+
+ assertTrue(assertionCount == 10, "Expected 10 assertions - found " + assertionCount);
+ return super.visitVariable(node, p);
+ }
+ }
+
+ private void validateIntersectionTypeInfo(TypeMirror expectedIntersectionType, IntersectionTypeInfo it) {
+
+ assertTrue(expectedIntersectionType.getKind() == TypeKind.INTERSECTION, "INTERSECTION kind expected");
+
+ try {
+ new SimpleTypeVisitor6<Void, Void>(){}.visit(expectedIntersectionType);
+ throw new RuntimeException("Expected UnknownTypeException not thrown.");
+ } catch (UnknownTypeException ute) {
+ ; // Expected
+ }
+
+ try {
+ new SimpleTypeVisitor7<Void, Void>(){}.visit(expectedIntersectionType);
+ throw new RuntimeException("Expected UnknownTypeException not thrown.");
+ } catch (UnknownTypeException ute) {
+ ; // Expected
+ }
+
+ IntersectionType intersectionType = new SimpleTypeVisitor<IntersectionType, Void>(){
+ @Override
+ protected IntersectionType defaultAction(TypeMirror e, Void p) {return null;}
+
+ @Override
+ public IntersectionType visitIntersection(IntersectionType t, Void p) {return t;}
+ }.visit(expectedIntersectionType);
+ assertTrue(intersectionType != null, "Must get a non-null intersection type.");
+
+ assertTrue(it.value().length == intersectionType.getBounds().size(), "Cardinalities do not match");
+
+ String[] typeNames = it.value();
+ for(int i = 0; i < typeNames.length; i++) {
+ TypeMirror typeFromAnnotation = nameToType(typeNames[i]);
+ assertTrue(types.isSameType(typeFromAnnotation, intersectionType.getBounds().get(i)),
+ "Types were not equal.");
+ }
+ }
+
+ private TypeMirror nameToType(String name) {
+ return elements.getTypeElement(name).asType();
+ }
+
+ private static void assertTrue(boolean cond, String msg) {
+ assertionCount++;
+ if (!cond)
+ throw new AssertionError(msg);
+ }
+
+ static int assertionCount = 0;
+}
--- a/langtools/test/tools/javac/defaultMethodExecution/DefaultMethodRegressionTests.java Mon Dec 17 08:28:27 2012 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,137 +0,0 @@
-/*
- * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/**
- * @test
- * @bug 8003639
- * @summary convert lambda testng tests to jtreg and add them
- * @run testng DefaultMethodRegressionTests
- */
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import org.testng.annotations.Test;
-
-import static org.testng.Assert.*;
-
-/**
- * This set of classes/interfaces (K/I/C) is specially designed to expose a
- * bug in the JVM where it did not find some overloaded methods in some
- * specific situations. (fixed by hotspot changeset ffb9316fd9ed)
- */
-interface K {
- int bbb(Long l);
-}
-
-interface I extends K {
- default void aaa() {}
- default void aab() {}
- default void aac() {}
-
- default int bbb(Integer i) { return 22; }
- default int bbb(Float f) { return 33; }
- default int bbb(Long l) { return 44; }
- default int bbb(Double d) { return 55; }
- default int bbb(String s) { return 66; }
-
- default void caa() {}
- default void cab() {}
- default void cac() {}
-}
-
-class C implements I {}
-
-public class DefaultMethodRegressionTests {
-
- @Test(groups = "vm")
- public void testLostOverloadedMethod() {
- C c = new C();
- assertEquals(c.bbb(new Integer(1)), 22);
- assertEquals(c.bbb(new Float(1.1)), 33);
- assertEquals(c.bbb(new Long(1L)), 44);
- assertEquals(c.bbb(new Double(0.01)), 55);
- assertEquals(c.bbb(new String("")), 66);
- }
-
- // Test to ensure that the inference verifier accepts older classfiles
- // with classes that implement interfaces with defaults.
- @Test(groups = "vm")
- public void testInferenceVerifier() {
- // interface I { int m() default { return 99; } }
- byte I_bytes[] = {
- (byte)0xca, (byte)0xfe, (byte)0xba, (byte)0xbe, 0x00, 0x00, 0x00, 0x33,
- 0x00, 0x08, 0x07, 0x00, 0x06, 0x07, 0x00, 0x07,
- 0x01, 0x00, 0x03, 0x66, 0x6f, 0x6f, 0x01, 0x00,
- 0x03, 0x28, 0x29, 0x49, 0x01, 0x00, 0x04, 0x43,
- 0x6f, 0x64, 0x65, 0x01, 0x00, 0x01, 0x49, 0x01,
- 0x00, 0x10, 0x6a, 0x61, 0x76, 0x61, 0x2f, 0x6c,
- 0x61, 0x6e, 0x67, 0x2f, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x06, 0x00, 0x00, 0x01, 0x00, 0x02,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x01,
- 0x00, 0x03, 0x00, 0x04, 0x00, 0x01, 0x00, 0x05,
- 0x00, 0x00, 0x00, 0x0f, 0x00, 0x01, 0x00, 0x01,
- 0x00, 0x00, 0x00, 0x03, 0x10, 0x63, (byte)0xac, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00
- };
- // public class C implements I {} /* -target 1.5 */
- byte C_bytes[] = {
- (byte)0xca, (byte)0xfe, (byte)0xba, (byte)0xbe, 0x00, 0x00, 0x00, 0x31,
- 0x00, 0x0c, 0x0a, 0x00, 0x03, 0x00, 0x08, 0x07,
- 0x00, 0x09, 0x07, 0x00, 0x0a, 0x07, 0x00, 0x0b,
- 0x01, 0x00, 0x06, 0x3c, 0x69, 0x6e, 0x69, 0x74,
- 0x3e, 0x01, 0x00, 0x03, 0x28, 0x29, 0x56, 0x01,
- 0x00, 0x04, 0x43, 0x6f, 0x64, 0x65, 0x0c, 0x00,
- 0x05, 0x00, 0x06, 0x01, 0x00, 0x01, 0x43, 0x01,
- 0x00, 0x10, 0x6a, 0x61, 0x76, 0x61, 0x2f, 0x6c,
- 0x61, 0x6e, 0x67, 0x2f, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x01, 0x00, 0x01, 0x49, 0x00, 0x21,
- 0x00, 0x02, 0x00, 0x03, 0x00, 0x01, 0x00, 0x04,
- 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x05,
- 0x00, 0x06, 0x00, 0x01, 0x00, 0x07, 0x00, 0x00,
- 0x00, 0x11, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x05, 0x2a, (byte)0xb7, 0x00, 0x01, (byte)0xb1, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00
- };
-
- ClassLoader cl = new ClassLoader() {
- protected Class<?> findClass(String name) {
- if (name.equals("I")) {
- return defineClass("I", I_bytes, 0, I_bytes.length);
- } else if (name.equals("C")) {
- return defineClass("C", C_bytes, 0, C_bytes.length);
- } else {
- return null;
- }
- }
- };
- try {
- Class.forName("C", true, cl);
- } catch (Exception e) {
- // unmodified verifier will throw VerifyError
- fail("No exception should be thrown");
- }
- }
-}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/defaultMethods/defaultMethodExecution/DefaultMethodRegressionTests.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @ignore 8004360
+ * @bug 8003639
+ * @summary convert lambda testng tests to jtreg and add them
+ * @run testng DefaultMethodRegressionTests
+ */
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import org.testng.annotations.Test;
+
+import static org.testng.Assert.*;
+
+/**
+ * This set of classes/interfaces (K/I/C) is specially designed to expose a
+ * bug in the JVM where it did not find some overloaded methods in some
+ * specific situations. (fixed by hotspot changeset ffb9316fd9ed)
+ */
+interface K {
+ int bbb(Long l);
+}
+
+interface I extends K {
+ default void aaa() {}
+ default void aab() {}
+ default void aac() {}
+
+ default int bbb(Integer i) { return 22; }
+ default int bbb(Float f) { return 33; }
+ default int bbb(Long l) { return 44; }
+ default int bbb(Double d) { return 55; }
+ default int bbb(String s) { return 66; }
+
+ default void caa() {}
+ default void cab() {}
+ default void cac() {}
+}
+
+class C implements I {}
+
+public class DefaultMethodRegressionTests {
+
+ @Test(groups = "vm")
+ public void testLostOverloadedMethod() {
+ C c = new C();
+ assertEquals(c.bbb(new Integer(1)), 22);
+ assertEquals(c.bbb(new Float(1.1)), 33);
+ assertEquals(c.bbb(new Long(1L)), 44);
+ assertEquals(c.bbb(new Double(0.01)), 55);
+ assertEquals(c.bbb(new String("")), 66);
+ }
+
+ // Test to ensure that the inference verifier accepts older classfiles
+ // with classes that implement interfaces with defaults.
+ @Test(groups = "vm")
+ public void testInferenceVerifier() {
+ // interface I { int m() default { return 99; } }
+ byte I_bytes[] = {
+ (byte)0xca, (byte)0xfe, (byte)0xba, (byte)0xbe, 0x00, 0x00, 0x00, 0x33,
+ 0x00, 0x08, 0x07, 0x00, 0x06, 0x07, 0x00, 0x07,
+ 0x01, 0x00, 0x03, 0x66, 0x6f, 0x6f, 0x01, 0x00,
+ 0x03, 0x28, 0x29, 0x49, 0x01, 0x00, 0x04, 0x43,
+ 0x6f, 0x64, 0x65, 0x01, 0x00, 0x01, 0x49, 0x01,
+ 0x00, 0x10, 0x6a, 0x61, 0x76, 0x61, 0x2f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x06, 0x00, 0x00, 0x01, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x01,
+ 0x00, 0x03, 0x00, 0x04, 0x00, 0x01, 0x00, 0x05,
+ 0x00, 0x00, 0x00, 0x0f, 0x00, 0x01, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x03, 0x10, 0x63, (byte)0xac, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00
+ };
+ // public class C implements I {} /* -target 1.5 */
+ byte C_bytes[] = {
+ (byte)0xca, (byte)0xfe, (byte)0xba, (byte)0xbe, 0x00, 0x00, 0x00, 0x31,
+ 0x00, 0x0c, 0x0a, 0x00, 0x03, 0x00, 0x08, 0x07,
+ 0x00, 0x09, 0x07, 0x00, 0x0a, 0x07, 0x00, 0x0b,
+ 0x01, 0x00, 0x06, 0x3c, 0x69, 0x6e, 0x69, 0x74,
+ 0x3e, 0x01, 0x00, 0x03, 0x28, 0x29, 0x56, 0x01,
+ 0x00, 0x04, 0x43, 0x6f, 0x64, 0x65, 0x0c, 0x00,
+ 0x05, 0x00, 0x06, 0x01, 0x00, 0x01, 0x43, 0x01,
+ 0x00, 0x10, 0x6a, 0x61, 0x76, 0x61, 0x2f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x01, 0x00, 0x01, 0x49, 0x00, 0x21,
+ 0x00, 0x02, 0x00, 0x03, 0x00, 0x01, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x05,
+ 0x00, 0x06, 0x00, 0x01, 0x00, 0x07, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x05, 0x2a, (byte)0xb7, 0x00, 0x01, (byte)0xb1, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00
+ };
+
+ ClassLoader cl = new ClassLoader() {
+ protected Class<?> findClass(String name) {
+ if (name.equals("I")) {
+ return defineClass("I", I_bytes, 0, I_bytes.length);
+ } else if (name.equals("C")) {
+ return defineClass("C", C_bytes, 0, C_bytes.length);
+ } else {
+ return null;
+ }
+ }
+ };
+ try {
+ Class.forName("C", true, cl);
+ } catch (Exception e) {
+ // unmodified verifier will throw VerifyError
+ fail("No exception should be thrown");
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/diags/examples/IntersectionTypesInCastNotSupported.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+// key: compiler.err.intersection.types.in.cast.not.supported.in.source
+// options: -source 7 -Xlint:-options
+
+interface IntersectionTypesInCastNotSupported {
+ Object o = (A & B)null;
+}
--- a/langtools/test/tools/javac/diags/examples/InvalidGenericDescInFunctionalInterface.java Mon Dec 17 08:28:27 2012 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-// key: compiler.err.prob.found.req
-// key: compiler.misc.invalid.generic.desc.in.functional.intf
-
-class InvalidGenericDescInFunctionalIntf {
-
- interface SAM {
- <Z> void m();
- }
-
- SAM s = x-> { };
-}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/diags/examples/InvalidGenericLambdaTarget.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+// key: compiler.err.prob.found.req
+// key: compiler.misc.invalid.generic.lambda.target
+
+class InvalidGenericLambdaTarget {
+
+ interface SAM {
+ <Z> void m();
+ }
+
+ SAM s = x-> { };
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/diags/examples/SecondaryBoundMustBeMarkerIntf.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+// key: compiler.err.prob.found.req
+// key: compiler.misc.secondary.bound.must.be.marker.intf
+// options: -XDallowIntersectionTypes
+
+class SecondaryBoundMustBeMarkerInterface {
+ Runnable r = (Runnable & Comparable<?>)()->{};
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/diags/examples/StaticBoundMref.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+// key: compiler.err.invalid.mref
+// key: compiler.misc.static.bound.mref
+
+class StaticBoundMref {
+
+ Runnable r = new StaticBoundMref()::m;
+
+ static void m() { }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/diags/examples/StaticMrefWithTargs.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+// key: compiler.err.invalid.mref
+// key: compiler.misc.static.mref.with.targs
+
+class StaticMrefWithTargs<X> {
+
+ Runnable r = StaticMrefWithTargs<String>::m;
+
+ static void m() { }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/lambda/FunctionalInterfaceConversionTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8003280 8004102
+ * @summary Add lambda tests
+ * perform several automated checks in lambda conversion, esp. around accessibility
+ * @author Maurizio Cimadamore
+ * @run main FunctionalInterfaceConversionTest
+ */
+
+import com.sun.source.util.JavacTask;
+import java.net.URI;
+import java.util.Arrays;
+import javax.tools.Diagnostic;
+import javax.tools.JavaCompiler;
+import javax.tools.JavaFileObject;
+import javax.tools.SimpleJavaFileObject;
+import javax.tools.StandardJavaFileManager;
+import javax.tools.ToolProvider;
+
+public class FunctionalInterfaceConversionTest {
+
+ enum PackageKind {
+ NO_PKG(""),
+ PKG_A("a");
+
+ String pkg;
+
+ PackageKind(String pkg) {
+ this.pkg = pkg;
+ }
+
+ String getPkgDecl() {
+ return this == NO_PKG ?
+ "" :
+ "package " + pkg + ";";
+ }
+
+ String getImportStat() {
+ return this == NO_PKG ?
+ "" :
+ "import " + pkg + ".*;";
+ }
+ }
+
+ enum SamKind {
+ CLASS("public class Sam { }"),
+ ABSTACT_CLASS("public abstract class Sam { }"),
+ ANNOTATION("public @interface Sam { }"),
+ ENUM("public enum Sam { }"),
+ INTERFACE("public interface Sam { \n #METH; \n }");
+
+ String sam_str;
+
+ SamKind(String sam_str) {
+ this.sam_str = sam_str;
+ }
+
+ String getSam(String methStr) {
+ return sam_str.replaceAll("#METH", methStr);
+ }
+ }
+
+ enum ModifierKind {
+ PUBLIC("public"),
+ PACKAGE("");
+
+ String modifier_str;
+
+ ModifierKind(String modifier_str) {
+ this.modifier_str = modifier_str;
+ }
+
+ boolean stricterThan(ModifierKind that) {
+ return this.ordinal() > that.ordinal();
+ }
+ }
+
+ enum TypeKind {
+ EXCEPTION("Exception"),
+ PKG_CLASS("PackageClass");
+
+ String typeStr;
+
+ private TypeKind(String typeStr) {
+ this.typeStr = typeStr;
+ }
+ }
+
+ enum ExprKind {
+ LAMBDA("x -> null"),
+ MREF("this::m");
+
+ String exprStr;
+
+ private ExprKind(String exprStr) {
+ this.exprStr = exprStr;
+ }
+ }
+
+ enum MethodKind {
+ NONE(""),
+ NON_GENERIC("public abstract #R m(#ARG s) throws #T;"),
+ GENERIC("public abstract <X> #R m(#ARG s) throws #T;");
+
+ String methodTemplate;
+
+ private MethodKind(String methodTemplate) {
+ this.methodTemplate = methodTemplate;
+ }
+
+ String getMethod(TypeKind retType, TypeKind argType, TypeKind thrownType) {
+ return methodTemplate.replaceAll("#R", retType.typeStr).
+ replaceAll("#ARG", argType.typeStr).
+ replaceAll("#T", thrownType.typeStr);
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ final JavaCompiler comp = ToolProvider.getSystemJavaCompiler();
+ StandardJavaFileManager fm = comp.getStandardFileManager(null, null, null);
+ for (PackageKind samPkg : PackageKind.values()) {
+ for (ModifierKind modKind : ModifierKind.values()) {
+ for (SamKind samKind : SamKind.values()) {
+ for (MethodKind samMeth : MethodKind.values()) {
+ for (MethodKind clientMeth : MethodKind.values()) {
+ for (TypeKind retType : TypeKind.values()) {
+ for (TypeKind argType : TypeKind.values()) {
+ for (TypeKind thrownType : TypeKind.values()) {
+ for (ExprKind exprKind : ExprKind.values()) {
+ new FunctionalInterfaceConversionTest(samPkg, modKind, samKind,
+ samMeth, clientMeth, retType, argType, thrownType, exprKind).test(comp, fm);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ PackageKind samPkg;
+ ModifierKind modKind;
+ SamKind samKind;
+ MethodKind samMeth;
+ MethodKind clientMeth;
+ TypeKind retType;
+ TypeKind argType;
+ TypeKind thrownType;
+ ExprKind exprKind;
+ DiagnosticChecker dc;
+
+ SourceFile samSourceFile = new SourceFile("Sam.java", "#P \n #C") {
+ public String toString() {
+ return template.replaceAll("#P", samPkg.getPkgDecl()).
+ replaceAll("#C", samKind.getSam(samMeth.getMethod(retType, argType, thrownType)));
+ }
+ };
+
+ SourceFile pkgClassSourceFile = new SourceFile("PackageClass.java",
+ "#P\n #M class PackageClass extends Exception { }") {
+ public String toString() {
+ return template.replaceAll("#P", samPkg.getPkgDecl()).
+ replaceAll("#M", modKind.modifier_str);
+ }
+ };
+
+ SourceFile clientSourceFile = new SourceFile("Client.java",
+ "#I\n abstract class Client { \n" +
+ " Sam s = #E;\n" +
+ " #M \n }") {
+ public String toString() {
+ return template.replaceAll("#I", samPkg.getImportStat())
+ .replaceAll("#E", exprKind.exprStr)
+ .replaceAll("#M", clientMeth.getMethod(retType, argType, thrownType));
+ }
+ };
+
+ FunctionalInterfaceConversionTest(PackageKind samPkg, ModifierKind modKind, SamKind samKind,
+ MethodKind samMeth, MethodKind clientMeth, TypeKind retType, TypeKind argType,
+ TypeKind thrownType, ExprKind exprKind) {
+ this.samPkg = samPkg;
+ this.modKind = modKind;
+ this.samKind = samKind;
+ this.samMeth = samMeth;
+ this.clientMeth = clientMeth;
+ this.retType = retType;
+ this.argType = argType;
+ this.thrownType = thrownType;
+ this.exprKind = exprKind;
+ this.dc = new DiagnosticChecker();
+ }
+
+ void test(JavaCompiler comp, StandardJavaFileManager fm) throws Exception {
+ JavacTask ct = (JavacTask)comp.getTask(null, fm, dc,
+ null, null, Arrays.asList(samSourceFile, pkgClassSourceFile, clientSourceFile));
+ ct.analyze();
+ if (dc.errorFound == checkSamConversion()) {
+ throw new AssertionError(samSourceFile + "\n\n" + pkgClassSourceFile + "\n\n" + clientSourceFile);
+ }
+ }
+
+ boolean checkSamConversion() {
+ if (samKind != SamKind.INTERFACE) {
+ //sam type must be an interface
+ return false;
+ } else if (samMeth == MethodKind.NONE) {
+ //interface must have at least a method
+ return false;
+ } else if (exprKind == ExprKind.LAMBDA &&
+ samMeth != MethodKind.NON_GENERIC) {
+ //target method for lambda must be non-generic
+ return false;
+ } else if (exprKind == ExprKind.MREF &&
+ clientMeth == MethodKind.NONE) {
+ return false;
+ } else if (samPkg != PackageKind.NO_PKG &&
+ modKind != ModifierKind.PUBLIC &&
+ (retType == TypeKind.PKG_CLASS ||
+ argType == TypeKind.PKG_CLASS ||
+ thrownType == TypeKind.PKG_CLASS)) {
+ //target must not contain inaccessible types
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ abstract class SourceFile extends SimpleJavaFileObject {
+
+ protected String template;
+
+ public SourceFile(String filename, String template) {
+ super(URI.create("myfo:/" + filename), JavaFileObject.Kind.SOURCE);
+ this.template = template;
+ }
+
+ @Override
+ public CharSequence getCharContent(boolean ignoreEncodingErrors) {
+ return toString();
+ }
+
+ public abstract String toString();
+ }
+
+ static class DiagnosticChecker implements javax.tools.DiagnosticListener<JavaFileObject> {
+
+ boolean errorFound = false;
+
+ public void report(Diagnostic<? extends JavaFileObject> diagnostic) {
+ if (diagnostic.getKind() == Diagnostic.Kind.ERROR) {
+ errorFound = true;
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/lambda/Intersection01.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8002099
+ * @summary Add support for intersection types in cast expression
+ * @compile/fail/ref=Intersection01.out -XDallowIntersectionTypes -XDrawDiagnostics Intersection01.java
+ */
+class Intersection01 {
+
+ interface SAM {
+ void m();
+ }
+
+ Object o1 = (java.io.Serializable & SAM)()->{};
+ Object o2 = (SAM & java.io.Serializable)()->{};
+ Object o3 = (java.io.Serializable & SAM)Intersection01::m;
+ Object o4 = (SAM & java.io.Serializable)Intersection01::m;
+
+ static void m() { }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/lambda/Intersection01.out Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,3 @@
+Intersection01.java:36:45: compiler.err.prob.found.req: (compiler.misc.not.a.functional.intf.1: (compiler.misc.no.abstracts: kindname.interface, java.io.Serializable))
+Intersection01.java:38:45: compiler.err.prob.found.req: (compiler.misc.not.a.functional.intf.1: (compiler.misc.no.abstracts: kindname.interface, java.io.Serializable))
+2 errors
--- a/langtools/test/tools/javac/lambda/LambdaConv21.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/test/tools/javac/lambda/LambdaConv21.java Mon Dec 17 08:30:06 2012 -0500
@@ -23,7 +23,7 @@
static void testExpressionLambda() {
SAM_void s1 = ()->m_void(); //ok
SAM_java_lang_Void s2 = ()->m_void(); //no - incompatible target
- SAM_void s3 = ()->m_java_lang_Void(); //no - incompatible target
+ SAM_void s3 = ()->m_java_lang_Void(); //ok - expression statement lambda is compatible with void
SAM_java_lang_Void s4 = ()->m_java_lang_Void(); //ok
}
--- a/langtools/test/tools/javac/lambda/LambdaConv21.out Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/test/tools/javac/lambda/LambdaConv21.out Mon Dec 17 08:30:06 2012 -0500
@@ -1,6 +1,5 @@
LambdaConv21.java:25:43: compiler.err.prob.found.req: (compiler.misc.incompatible.ret.type.in.lambda: (compiler.misc.inconvertible.types: void, java.lang.Void))
-LambdaConv21.java:26:43: compiler.err.prob.found.req: (compiler.misc.incompatible.ret.type.in.lambda: (compiler.misc.inconvertible.types: java.lang.Void, void))
LambdaConv21.java:32:33: compiler.err.prob.found.req: (compiler.misc.incompatible.ret.type.in.lambda: (compiler.misc.missing.ret.val: java.lang.Void))
LambdaConv21.java:33:53: compiler.err.prob.found.req: (compiler.misc.incompatible.ret.type.in.lambda: (compiler.misc.unexpected.ret.val))
LambdaConv21.java:36:33: compiler.err.prob.found.req: (compiler.misc.incompatible.ret.type.in.lambda: (compiler.misc.missing.ret.val: java.lang.Void))
-5 errors
+4 errors
--- a/langtools/test/tools/javac/lambda/LambdaConversionTest.java Mon Dec 17 08:28:27 2012 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,246 +0,0 @@
-/*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/**
- * @test
- * @bug 8003280
- * @summary Add lambda tests
- * perform several automated checks in lambda conversion, esp. around accessibility
- * @author Maurizio Cimadamore
- * @run main LambdaConversionTest
- */
-
-import com.sun.source.util.JavacTask;
-import java.net.URI;
-import java.util.Arrays;
-import javax.tools.Diagnostic;
-import javax.tools.JavaCompiler;
-import javax.tools.JavaFileObject;
-import javax.tools.SimpleJavaFileObject;
-import javax.tools.ToolProvider;
-
-public class LambdaConversionTest {
-
- enum PackageKind {
- NO_PKG(""),
- PKG_A("a");
-
- String pkg;
-
- PackageKind(String pkg) {
- this.pkg = pkg;
- }
-
- String getPkgDecl() {
- return this == NO_PKG ?
- "" :
- "package " + pkg + ";";
- }
-
- String getImportStat() {
- return this == NO_PKG ?
- "" :
- "import " + pkg + ".*;";
- }
- }
-
- enum SamKind {
- CLASS("public class Sam { }"),
- ABSTACT_CLASS("public abstract class Sam { }"),
- ANNOTATION("public @interface Sam { }"),
- ENUM("public enum Sam { }"),
- INTERFACE("public interface Sam { \n #METH; \n }");
-
- String sam_str;
-
- SamKind(String sam_str) {
- this.sam_str = sam_str;
- }
-
- String getSam(String methStr) {
- return sam_str.replaceAll("#METH", methStr);
- }
- }
-
- enum ModifierKind {
- PUBLIC("public"),
- PACKAGE("");
-
- String modifier_str;
-
- ModifierKind(String modifier_str) {
- this.modifier_str = modifier_str;
- }
-
- boolean stricterThan(ModifierKind that) {
- return this.ordinal() > that.ordinal();
- }
- }
-
- enum TypeKind {
- EXCEPTION("Exception"),
- PKG_CLASS("PackageClass");
-
- String typeStr;
-
- private TypeKind(String typeStr) {
- this.typeStr = typeStr;
- }
- }
-
- enum MethodKind {
- NONE(""),
- NON_GENERIC("public #R m(#ARG s) throws #T;"),
- GENERIC("public <X> #R m(#ARG s) throws #T;");
-
- String methodTemplate;
-
- private MethodKind(String methodTemplate) {
- this.methodTemplate = methodTemplate;
- }
-
- String getMethod(TypeKind retType, TypeKind argType, TypeKind thrownType) {
- return methodTemplate.replaceAll("#R", retType.typeStr).
- replaceAll("#ARG", argType.typeStr).
- replaceAll("#T", thrownType.typeStr);
- }
- }
-
- public static void main(String[] args) throws Exception {
- for (PackageKind samPkg : PackageKind.values()) {
- for (ModifierKind modKind : ModifierKind.values()) {
- for (SamKind samKind : SamKind.values()) {
- for (MethodKind meth : MethodKind.values()) {
- for (TypeKind retType : TypeKind.values()) {
- for (TypeKind argType : TypeKind.values()) {
- for (TypeKind thrownType : TypeKind.values()) {
- new LambdaConversionTest(samPkg, modKind, samKind,
- meth, retType, argType, thrownType).test();
- }
- }
- }
- }
- }
- }
- }
- }
-
- PackageKind samPkg;
- ModifierKind modKind;
- SamKind samKind;
- MethodKind meth;
- TypeKind retType;
- TypeKind argType;
- TypeKind thrownType;
-
- SourceFile samSourceFile = new SourceFile("Sam.java", "#P \n #C") {
- public String toString() {
- return template.replaceAll("#P", samPkg.getPkgDecl()).
- replaceAll("#C", samKind.getSam(meth.getMethod(retType, argType, thrownType)));
- }
- };
-
- SourceFile pkgClassSourceFile = new SourceFile("PackageClass.java",
- "#P\n #M class PackageClass extends Exception { }") {
- public String toString() {
- return template.replaceAll("#P", samPkg.getPkgDecl()).
- replaceAll("#M", modKind.modifier_str);
- }
- };
-
- SourceFile clientSourceFile = new SourceFile("Client.java",
- "#I\n class Client { Sam s = x -> null; }") {
- public String toString() {
- return template.replaceAll("#I", samPkg.getImportStat());
- }
- };
-
- LambdaConversionTest(PackageKind samPkg, ModifierKind modKind, SamKind samKind,
- MethodKind meth, TypeKind retType, TypeKind argType, TypeKind thrownType) {
- this.samPkg = samPkg;
- this.modKind = modKind;
- this.samKind = samKind;
- this.meth = meth;
- this.retType = retType;
- this.argType = argType;
- this.thrownType = thrownType;
- }
-
- void test() throws Exception {
- final JavaCompiler tool = ToolProvider.getSystemJavaCompiler();
- DiagnosticChecker dc = new DiagnosticChecker();
- JavacTask ct = (JavacTask)tool.getTask(null, null, dc,
- null, null, Arrays.asList(samSourceFile, pkgClassSourceFile, clientSourceFile));
- ct.analyze();
- if (dc.errorFound == checkSamConversion()) {
- throw new AssertionError(samSourceFile + "\n\n" + pkgClassSourceFile + "\n\n" + clientSourceFile);
- }
- }
-
- boolean checkSamConversion() {
- if (samKind != SamKind.INTERFACE) {
- //sam type must be an interface
- return false;
- } else if (meth != MethodKind.NON_GENERIC) {
- //target method must be non-generic
- return false;
- } else if (samPkg != PackageKind.NO_PKG &&
- modKind != ModifierKind.PUBLIC &&
- (retType == TypeKind.PKG_CLASS ||
- argType == TypeKind.PKG_CLASS ||
- thrownType == TypeKind.PKG_CLASS)) {
- //target must not contain inaccessible types
- return false;
- } else {
- return true;
- }
- }
-
- abstract class SourceFile extends SimpleJavaFileObject {
-
- protected String template;
-
- public SourceFile(String filename, String template) {
- super(URI.create("myfo:/" + filename), JavaFileObject.Kind.SOURCE);
- this.template = template;
- }
-
- @Override
- public CharSequence getCharContent(boolean ignoreEncodingErrors) {
- return toString();
- }
-
- public abstract String toString();
- }
-
- static class DiagnosticChecker implements javax.tools.DiagnosticListener<JavaFileObject> {
-
- boolean errorFound = false;
-
- public void report(Diagnostic<? extends JavaFileObject> diagnostic) {
- if (diagnostic.getKind() == Diagnostic.Kind.ERROR) {
- errorFound = true;
- }
- }
- }
-}
--- a/langtools/test/tools/javac/lambda/LambdaParserTest.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/test/tools/javac/lambda/LambdaParserTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -90,9 +90,14 @@
enum LambdaParameterKind {
IMPLICIT(""),
EXPLIICT_SIMPLE("A"),
+ EXPLIICT_SIMPLE_ARR1("A[]"),
+ EXPLIICT_SIMPLE_ARR2("A[][]"),
EXPLICIT_VARARGS("A..."),
EXPLICIT_GENERIC1("A<X>"),
- EXPLICIT_GENERIC3("A<? extends X, ? super Y>");
+ EXPLICIT_GENERIC2("A<? extends X, ? super Y>"),
+ EXPLICIT_GENERIC2_VARARGS("A<? extends X, ? super Y>..."),
+ EXPLICIT_GENERIC2_ARR1("A<? extends X, ? super Y>[]"),
+ EXPLICIT_GENERIC2_ARR2("A<? extends X, ? super Y>[][]");
String parameterType;
@@ -103,6 +108,11 @@
boolean explicit() {
return this != IMPLICIT;
}
+
+ boolean isVarargs() {
+ return this == EXPLICIT_VARARGS ||
+ this == EXPLICIT_GENERIC2_VARARGS;
+ }
}
enum ModifierKind {
@@ -253,7 +263,7 @@
if (lk.arity() == 2 &&
(pk1.explicit() != pk2.explicit() ||
- pk1 == LambdaParameterKind.EXPLICIT_VARARGS)) {
+ pk1.isVarargs())) {
errorExpected = true;
}
--- a/langtools/test/tools/javac/lambda/MethodReference30.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/test/tools/javac/lambda/MethodReference30.java Mon Dec 17 08:30:06 2012 -0500
@@ -46,7 +46,7 @@
assertTrue(true);
}
- static void m() { }
+ void m() { }
public static void main(String[] args) {
SAM s = new MethodReference30()::m;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/lambda/MethodReference55.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8004101
+ * @summary Add checks for method reference well-formedness
+ * @compile/fail/ref=MethodReference55.out -XDrawDiagnostics MethodReference55.java
+ */
+class MethodReference55<X> {
+
+ interface V {
+ void m(Object o);
+ }
+
+ V v = new MethodReference55<String>()::m;
+
+ void test() {
+ g(new MethodReference55<String>()::m);
+ }
+
+ void g(V v) { }
+
+ static void m(Object o) { };
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/lambda/MethodReference55.out Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,3 @@
+MethodReference55.java:36:11: compiler.err.invalid.mref: kindname.method, (compiler.misc.static.bound.mref)
+MethodReference55.java:39:11: compiler.err.invalid.mref: kindname.method, (compiler.misc.static.bound.mref)
+2 errors
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/lambda/MethodReference56.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8004101
+ * @summary Add checks for method reference well-formedness
+ * @compile/fail/ref=MethodReference56.out -XDrawDiagnostics MethodReference56.java
+ */
+class MethodReference56<X> {
+
+ interface V {
+ void m(Object o);
+ }
+
+ V v = MethodReference56<String>::m;
+
+ void test() {
+ g(MethodReference56<String>::m);
+ }
+
+ void g(V v) { }
+
+ static void m(Object o) { };
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/lambda/MethodReference56.out Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,3 @@
+MethodReference56.java:36:28: compiler.err.invalid.mref: kindname.method, (compiler.misc.static.mref.with.targs)
+MethodReference56.java:39:28: compiler.err.invalid.mref: kindname.method, (compiler.misc.static.mref.with.targs)
+2 errors
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/lambda/MethodReference57.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8004102
+ * @summary Add support for generic functional descriptors
+ * @compile MethodReference57.java
+ */
+class MethodReference57 {
+
+ interface F {
+ <X> void m();
+ }
+
+ void test() {
+ F f = this::g; //ok
+ }
+
+ void g() { }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/lambda/MethodReference58.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8004102
+ * @summary Add support for generic functional descriptors
+ * @compile/fail/ref=MethodReference58.out -XDrawDiagnostics MethodReference58.java
+ */
+class MethodReference58 {
+
+ interface F_Object {
+ <X> void m(X x);
+ }
+
+ interface F_Integer {
+ <X extends Integer> void m(X x);
+ }
+
+ void test() {
+ F_Object f1 = this::g; //incompatible bounds
+ F_Integer f2 = this::g; //ok
+ }
+
+ <Z extends Number> void g(Z z) { }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/lambda/MethodReference58.out Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,2 @@
+MethodReference58.java:41:23: compiler.err.prob.found.req: (compiler.misc.invalid.mref: kindname.method, (compiler.misc.cant.apply.symbol: kindname.method, g, Z, X, kindname.class, MethodReference58, (compiler.misc.inferred.do.not.conform.to.upper.bounds: X, java.lang.Number)))
+1 error
--- a/langtools/test/tools/javac/lambda/VoidCompatibility.out Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/test/tools/javac/lambda/VoidCompatibility.out Mon Dec 17 08:30:06 2012 -0500
@@ -1,2 +1,3 @@
+VoidCompatibility.java:17:9: compiler.err.ref.ambiguous: schedule, kindname.method, schedule(VoidCompatibility.Runnable), VoidCompatibility, kindname.method, schedule(VoidCompatibility.Thunk<?>), VoidCompatibility
VoidCompatibility.java:23:9: compiler.err.ref.ambiguous: schedule, kindname.method, schedule(VoidCompatibility.Runnable), VoidCompatibility, kindname.method, schedule(VoidCompatibility.Thunk<?>), VoidCompatibility
-1 error
+2 errors
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javac/lambda/intersection/IntersectionTargetTypeTest.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8002099
+ * @summary Add support for intersection types in cast expression
+ */
+
+import com.sun.source.util.JavacTask;
+import com.sun.tools.javac.util.List;
+import com.sun.tools.javac.util.ListBuffer;
+import java.net.URI;
+import java.util.Arrays;
+import javax.tools.Diagnostic;
+import javax.tools.JavaCompiler;
+import javax.tools.JavaFileObject;
+import javax.tools.SimpleJavaFileObject;
+import javax.tools.StandardJavaFileManager;
+import javax.tools.ToolProvider;
+
+public class IntersectionTargetTypeTest {
+
+ static int checkCount = 0;
+
+ enum BoundKind {
+ INTF,
+ CLASS,
+ SAM,
+ ZAM;
+ }
+
+ enum MethodKind {
+ NONE,
+ ABSTRACT,
+ DEFAULT;
+ }
+
+ enum TypeKind {
+ A("interface A { }\n", "A", BoundKind.ZAM),
+ B("interface B { default void m() { } }\n", "B", BoundKind.ZAM),
+ C("interface C { void m(); }\n", "C", BoundKind.SAM),
+ D("interface D extends B { }\n", "D", BoundKind.ZAM),
+ E("interface E extends C { }\n", "E", BoundKind.SAM),
+ F("interface F extends C { void g(); }\n", "F", BoundKind.INTF),
+ G("interface G extends B { void g(); }\n", "G", BoundKind.SAM),
+ H("interface H extends A { void g(); }\n", "H", BoundKind.SAM),
+ OBJECT("", "Object", BoundKind.CLASS),
+ STRING("", "String", BoundKind.CLASS);
+
+ String declStr;
+ String typeStr;
+ BoundKind boundKind;
+
+ private TypeKind(String declStr, String typeStr, BoundKind boundKind) {
+ this.declStr = declStr;
+ this.typeStr = typeStr;
+ this.boundKind = boundKind;
+ }
+
+ boolean compatibleSupertype(TypeKind tk) {
+ if (tk == this) return true;
+ switch (tk) {
+ case B:
+ return this != C && this != E && this != F;
+ case C:
+ return this != B && this != C && this != D && this != G;
+ case D: return compatibleSupertype(B);
+ case E:
+ case F: return compatibleSupertype(C);
+ case G: return compatibleSupertype(B);
+ case H: return compatibleSupertype(A);
+ default:
+ return true;
+ }
+ }
+ }
+
+ enum CastKind {
+ ONE_ARY("(#B0)", 1),
+ TWO_ARY("(#B0 & #B1)", 2),
+ THREE_ARY("(#B0 & #B1 & #B2)", 3);
+
+ String castTemplate;
+ int nbounds;
+
+ CastKind(String castTemplate, int nbounds) {
+ this.castTemplate = castTemplate;
+ this.nbounds = nbounds;
+ }
+ }
+
+ enum ExpressionKind {
+ LAMBDA("()->{}", true),
+ MREF("this::m", true),
+ //COND_LAMBDA("(true ? ()->{} : ()->{})", true), re-enable if spec allows this
+ //COND_MREF("(true ? this::m : this::m)", true),
+ STANDALONE("null", false);
+
+ String exprString;
+ boolean isFunctional;
+
+ private ExpressionKind(String exprString, boolean isFunctional) {
+ this.exprString = exprString;
+ this.isFunctional = isFunctional;
+ }
+ }
+
+ static class CastInfo {
+ CastKind kind;
+ TypeKind[] types;
+
+ CastInfo(CastKind kind, TypeKind... types) {
+ this.kind = kind;
+ this.types = types;
+ }
+
+ String getCast() {
+ String temp = kind.castTemplate;
+ for (int i = 0; i < kind.nbounds ; i++) {
+ temp = temp.replace(String.format("#B%d", i), types[i].typeStr);
+ }
+ return temp;
+ }
+
+ boolean wellFormed() {
+ //check for duplicate types
+ for (int i = 0 ; i < types.length ; i++) {
+ for (int j = 0 ; j < types.length ; j++) {
+ if (i != j && types[i] == types[j]) {
+ return false;
+ }
+ }
+ }
+ //check that classes only appear as first bound
+ boolean classOk = true;
+ for (int i = 0 ; i < types.length ; i++) {
+ if (types[i].boundKind == BoundKind.CLASS &&
+ !classOk) {
+ return false;
+ }
+ classOk = false;
+ }
+ //check that supertypes are mutually compatible
+ for (int i = 0 ; i < types.length ; i++) {
+ for (int j = 0 ; j < types.length ; j++) {
+ if (!types[i].compatibleSupertype(types[j]) && i != j) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+ }
+
+ public static void main(String... args) throws Exception {
+ //create default shared JavaCompiler - reused across multiple compilations
+ JavaCompiler comp = ToolProvider.getSystemJavaCompiler();
+ StandardJavaFileManager fm = comp.getStandardFileManager(null, null, null);
+
+ for (CastInfo cInfo : allCastInfo()) {
+ for (ExpressionKind ek : ExpressionKind.values()) {
+ new IntersectionTargetTypeTest(cInfo, ek).run(comp, fm);
+ }
+ }
+ System.out.println("Total check executed: " + checkCount);
+ }
+
+ static List<CastInfo> allCastInfo() {
+ ListBuffer<CastInfo> buf = ListBuffer.lb();
+ for (CastKind kind : CastKind.values()) {
+ for (TypeKind b1 : TypeKind.values()) {
+ if (kind.nbounds == 1) {
+ buf.append(new CastInfo(kind, b1));
+ continue;
+ } else {
+ for (TypeKind b2 : TypeKind.values()) {
+ if (kind.nbounds == 2) {
+ buf.append(new CastInfo(kind, b1, b2));
+ continue;
+ } else {
+ for (TypeKind b3 : TypeKind.values()) {
+ buf.append(new CastInfo(kind, b1, b2, b3));
+ }
+ }
+ }
+ }
+ }
+ }
+ return buf.toList();
+ }
+
+ CastInfo cInfo;
+ ExpressionKind ek;
+ JavaSource source;
+ DiagnosticChecker diagChecker;
+
+ IntersectionTargetTypeTest(CastInfo cInfo, ExpressionKind ek) {
+ this.cInfo = cInfo;
+ this.ek = ek;
+ this.source = new JavaSource();
+ this.diagChecker = new DiagnosticChecker();
+ }
+
+ class JavaSource extends SimpleJavaFileObject {
+
+ String bodyTemplate = "class Test {\n" +
+ " void m() { }\n" +
+ " void test() {\n" +
+ " Object o = #C#E;\n" +
+ " } }";
+
+ String source = "";
+
+ public JavaSource() {
+ super(URI.create("myfo:/Test.java"), JavaFileObject.Kind.SOURCE);
+ for (TypeKind tk : TypeKind.values()) {
+ source += tk.declStr;
+ }
+ source += bodyTemplate.replaceAll("#C", cInfo.getCast()).replaceAll("#E", ek.exprString);
+ }
+
+ @Override
+ public CharSequence getCharContent(boolean ignoreEncodingErrors) {
+ return source;
+ }
+ }
+
+ void run(JavaCompiler tool, StandardJavaFileManager fm) throws Exception {
+ JavacTask ct = (JavacTask)tool.getTask(null, fm, diagChecker,
+ Arrays.asList("-XDallowIntersectionTypes"), null, Arrays.asList(source));
+ try {
+ ct.analyze();
+ } catch (Throwable ex) {
+ throw new AssertionError("Error thrown when compiling the following code:\n" + source.getCharContent(true));
+ }
+ check();
+ }
+
+ void check() {
+ checkCount++;
+
+ boolean errorExpected = !cInfo.wellFormed();
+
+ if (ek.isFunctional) {
+ //first bound must be a SAM
+ errorExpected |= cInfo.types[0].boundKind != BoundKind.SAM;
+ if (cInfo.types.length > 1) {
+ //additional bounds must be ZAMs
+ for (int i = 1; i < cInfo.types.length; i++) {
+ errorExpected |= cInfo.types[i].boundKind != BoundKind.ZAM;
+ }
+ }
+ }
+
+ if (errorExpected != diagChecker.errorFound) {
+ throw new Error("invalid diagnostics for source:\n" +
+ source.getCharContent(true) +
+ "\nFound error: " + diagChecker.errorFound +
+ "\nExpected error: " + errorExpected);
+ }
+ }
+
+ static class DiagnosticChecker implements javax.tools.DiagnosticListener<JavaFileObject> {
+
+ boolean errorFound;
+
+ public void report(Diagnostic<? extends JavaFileObject> diagnostic) {
+ if (diagnostic.getKind() == Diagnostic.Kind.ERROR) {
+ errorFound = true;
+ }
+ }
+ }
+}
--- a/langtools/test/tools/javac/lambda/methodReference/MethodRef1.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/test/tools/javac/lambda/methodReference/MethodRef1.java Mon Dec 17 08:30:06 2012 -0500
@@ -70,9 +70,6 @@
b = MethodRef1::foo; //static reference to foo(int)
b.m(1);
- b = new MethodRef1()::foo; //instance reference to static methods, supported for now
- b.m(1);
-
b = MethodRef1::bar; //static reference to bar(int)
b.m(2);
--- a/langtools/test/tools/javac/lambda/methodReference/SamConversion.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/test/tools/javac/lambda/methodReference/SamConversion.java Mon Dec 17 08:30:06 2012 -0500
@@ -133,15 +133,6 @@
} catch (Exception e) {
assertTrue(false);
}
-
- bar = new A()::method6;
- try {
- bar.m(1);
- assertTrue(false);
- } catch (MyException e) {
- } catch (Exception e) {
- assertTrue(false);
- }
}
/**
--- a/langtools/test/tools/javac/lambda/methodReferenceExecution/MethodReferenceTestKinds.java Mon Dec 17 08:28:27 2012 -0500
+++ b/langtools/test/tools/javac/lambda/methodReferenceExecution/MethodReferenceTestKinds.java Mon Dec 17 08:30:06 2012 -0500
@@ -119,20 +119,6 @@
assertEquals(var.get(inst("arg")), "SM:1-MethodReferenceTestKinds(arg)");
}
- public void testMRStaticEval() {
- MethodReferenceTestKinds evalCheck;
- S0 var = (evalCheck = inst("discard"))::staticMethod0;
- assertEquals(evalCheck.toString(), "MethodReferenceTestKinds(discard)");
- assertEquals(var.get(), "SM:0");
- }
-
- public void testMRStaticEvalArg() {
- MethodReferenceTestKinds evalCheck;
- S1 var = (evalCheck = inst("discard"))::staticMethod1;
- assertEquals(evalCheck.toString(), "MethodReferenceTestKinds(discard)");
- assertEquals(var.get(inst("arg")), "SM:1-MethodReferenceTestKinds(arg)");
- }
-
public void testMRTopLevel() {
SN0 var = MethodReferenceTestKindsBase::new;
assertEquals(var.make().toString(), "MethodReferenceTestKindsBase(blank)");
@@ -142,17 +128,7 @@
SN1 var = MethodReferenceTestKindsBase::new;
assertEquals(var.make("name").toString(), "MethodReferenceTestKindsBase(name)");
}
-/* unbound inner case not supported anymore (dropped by EG)
- public void testMRUnboundInner() {
- SXN0 var = MethodReferenceTestKinds.In::new;
- assertEquals(var.make(inst("out")).toString(), "In(blank)");
- }
- public void testMRUnboundInnerArg() {
- SXN1 var = MethodReferenceTestKinds.In::new;
- assertEquals(var.make(inst("out"), "name").toString(), "In(name)");
- }
-*/
public void testMRImplicitInner() {
SN0 var = MethodReferenceTestKinds.In::new;
assertEquals(var.make().toString(), "In(blank)");
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/langtools/test/tools/javap/T7190862.java Mon Dec 17 08:30:06 2012 -0500
@@ -0,0 +1,157 @@
+
+/*
+ * @test /nodynamiccopyright/
+ * @bug 7190862 7109747
+ * @summary javap shows an incorrect type for operands if the 'wide' prefix is used
+ */
+
+import com.sun.source.util.JavacTask;
+import com.sun.tools.javap.JavapFileManager;
+import com.sun.tools.javap.JavapTask;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.net.URI;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Locale;
+import javax.tools.Diagnostic;
+import javax.tools.DiagnosticCollector;
+import javax.tools.JavaCompiler;
+import javax.tools.JavaFileManager;
+import javax.tools.JavaFileObject;
+import javax.tools.SimpleJavaFileObject;
+import javax.tools.ToolProvider;
+
+public class T7190862 {
+
+ enum TypeWideInstructionMap {
+ INT("int", new String[]{"istore_w", "iload_w"}),
+ LONG("long", new String[]{"lstore_w", "lload_w"}),
+ FLOAT("float", new String[]{"fstore_w", "fload_w"}),
+ DOUBLE("double", new String[]{"dstore_w", "dload_w"}),
+ OBJECT("Object", new String[]{"astore_w", "aload_w"});
+
+ String type;
+ String[] instructions;
+
+ TypeWideInstructionMap(String type, String[] instructions) {
+ this.type = type;
+ this.instructions = instructions;
+ }
+ }
+
+ JavaSource source;
+
+ public static void main(String[] args) {
+ JavaCompiler comp = ToolProvider.getSystemJavaCompiler();
+ new T7190862().run(comp);
+ }
+
+ private void run(JavaCompiler comp) {
+ String code;
+ for (TypeWideInstructionMap typeInstructionMap: TypeWideInstructionMap.values()) {
+ if (typeInstructionMap != TypeWideInstructionMap.OBJECT) {
+ code = createWideLocalSource(typeInstructionMap.type, 300);
+ } else {
+ code = createWideLocalSourceForObject(300);
+ }
+ source = new JavaSource(code);
+ compile(comp);
+ check(typeInstructionMap.instructions);
+ }
+
+ //an extra test for the iinc instruction
+ code = createIincSource();
+ source = new JavaSource(code);
+ compile(comp);
+ check(new String[]{"iinc_w"});
+ }
+
+ private void compile(JavaCompiler comp) {
+ JavacTask ct = (JavacTask)comp.getTask(null, null, null, null, null, Arrays.asList(source));
+ try {
+ if (!ct.call()) {
+ throw new AssertionError("Error thrown when compiling the following source:\n" + source.getCharContent(true));
+ }
+ } catch (Throwable ex) {
+ throw new AssertionError("Error thrown when compiling the following source:\n" + source.getCharContent(true));
+ }
+ }
+
+ private void check(String[] instructions) {
+ String out = javap(Arrays.asList("-c"), Arrays.asList("Test.class"));
+ for (String line: out.split(System.getProperty("line.separator"))) {
+ line = line.trim();
+ for (String instruction: instructions) {
+ if (line.contains(instruction) && line.contains("#")) {
+ throw new Error("incorrect type for operands for instruction " + instruction);
+ }
+ }
+ }
+ }
+
+ private String javap(List<String> args, List<String> classes) {
+ DiagnosticCollector<JavaFileObject> dc = new DiagnosticCollector<JavaFileObject>();
+ StringWriter sw = new StringWriter();
+ PrintWriter pw = new PrintWriter(sw);
+ JavaFileManager fm = JavapFileManager.create(dc, pw);
+ JavapTask t = new JavapTask(pw, fm, dc, args, classes);
+ boolean ok = t.run();
+ if (!ok)
+ throw new Error("javap failed unexpectedly");
+
+ List<Diagnostic<? extends JavaFileObject>> diags = dc.getDiagnostics();
+ for (Diagnostic<? extends JavaFileObject> d: diags) {
+ if (d.getKind() == Diagnostic.Kind.ERROR)
+ throw new Error(d.getMessage(Locale.ENGLISH));
+ }
+ return sw.toString();
+
+ }
+
+ private String createWideLocalSource(String type, int numberOfVars) {
+ String result = " " + type + " x0 = 0;\n";
+ for (int i = 1; i < numberOfVars; i++) {
+ result += " " + type + " x" + i + " = x" + (i - 1) + " + 1;\n";
+ }
+ return result;
+ }
+
+ private String createWideLocalSourceForObject(int numberOfVars) {
+ String result = " Object x0 = new Object();\n";
+ for (int i = 1; i < numberOfVars; i++) {
+ result += " Object x" + i + " = x0;\n";
+ }
+ return result;
+ }
+
+ private String createIincSource() {
+ return " int i = 0;\n"
+ + " i += 1;\n"
+ + " i += 51;\n"
+ + " i += 101;\n"
+ + " i += 151;\n";
+ }
+
+ class JavaSource extends SimpleJavaFileObject {
+
+ String template = "class Test {\n" +
+ " public static void main(String[] args)\n" +
+ " {\n" +
+ " #C" +
+ " }\n" +
+ "}";
+
+ String source;
+
+ public JavaSource(String code) {
+ super(URI.create("Test.java"), JavaFileObject.Kind.SOURCE);
+ source = template.replaceAll("#C", code);
+ }
+
+ @Override
+ public CharSequence getCharContent(boolean ignoreEncodingErrors) {
+ return source;
+ }
+ }
+}
--- a/make/jprt.properties Mon Dec 17 08:28:27 2012 -0500
+++ b/make/jprt.properties Mon Dec 17 08:30:06 2012 -0500
@@ -88,7 +88,6 @@
${jprt.my.test.target.set:TESTNAME=jdk_jmx}, \
${jprt.my.test.target.set:TESTNAME=jdk_text}, \
${jprt.my.test.target.set:TESTNAME=jdk_tools}, \
- ${jprt.my.test.target.set:TESTNAME=jdk_jdi}, \
${jprt.my.test.target.set:TESTNAME=jdk_jfr}, \
${jprt.my.test.target.set:TESTNAME=jdk_other}
@@ -105,6 +104,7 @@
${jprt.my.test.target.set:TESTNAME=jdk_beans1}, \
${jprt.my.test.target.set:TESTNAME=jdk_beans2}, \
${jprt.my.test.target.set:TESTNAME=jdk_beans3}, \
+ ${jprt.my.test.target.set:TESTNAME=jdk_jdi}, \
${jprt.my.test.target.set:TESTNAME=jdk_sound}, \
${jprt.my.test.target.set:TESTNAME=jdk_swing}