# HG changeset patch # User duke # Date 1499279832 -7200 # Node ID 5b500c93ce4822d47061cd518ff3f72d9d8cb5b5 # Parent b2ea0d3316ef41ced845c14f27afe6628059da28# Parent 1c0a1cee60545ac37ea9c96a58a47bed97242ed4 Merge diff -r 1c0a1cee6054 -r 5b500c93ce48 .hgtags-top-repo --- a/.hgtags-top-repo Wed Jul 05 20:36:16 2017 +0200 +++ b/.hgtags-top-repo Wed Jul 05 20:37:12 2017 +0200 @@ -309,3 +309,4 @@ 82cf9aab9a83e41c8194ba01af9666afdb856cbe jdk9-b64 7c31f9d7b932f7924f1258d52885b1c7c3e078c2 jdk9-b65 dc6e8336f51bb6b67b7245766179eab5ca7720b4 jdk9-b66 +f546760134eb861fcfecd4ce611b0040b0d25a6a jdk9-b67 diff -r 1c0a1cee6054 -r 5b500c93ce48 common/autoconf/flags.m4 --- a/common/autoconf/flags.m4 Wed Jul 05 20:36:16 2017 +0200 +++ b/common/autoconf/flags.m4 Wed Jul 05 20:37:12 2017 +0200 @@ -338,14 +338,16 @@ # no adjustment ;; slowdebug ) - # Add runtime stack smashing and undefined behavior checks - CFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1" - CXXFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1" + # Add runtime stack smashing and undefined behavior checks. + # Not all versions of gcc support -fstack-protector + STACK_PROTECTOR_CFLAG="-fstack-protector-all" + FLAGS_COMPILER_CHECK_ARGUMENTS([$STACK_PROTECTOR_CFLAG], [], [STACK_PROTECTOR_CFLAG=""]) + + CFLAGS_DEBUG_OPTIONS="$STACK_PROTECTOR_CFLAG --param ssp-buffer-size=1" + CXXFLAGS_DEBUG_OPTIONS="$STACK_PROTECTOR_CFLAG --param ssp-buffer-size=1" ;; esac fi - AC_SUBST(CFLAGS_DEBUG_OPTIONS) - AC_SUBST(CXXFLAGS_DEBUG_OPTIONS) # Optimization levels if test "x$TOOLCHAIN_TYPE" = xsolstudio; then diff -r 1c0a1cee6054 -r 5b500c93ce48 common/autoconf/generated-configure.sh --- a/common/autoconf/generated-configure.sh Wed Jul 05 20:36:16 2017 +0200 +++ b/common/autoconf/generated-configure.sh Wed Jul 05 20:37:12 2017 +0200 @@ -718,8 +718,6 @@ C_O_FLAG_NORM C_O_FLAG_HI C_O_FLAG_HIGHEST -CXXFLAGS_DEBUG_OPTIONS -CFLAGS_DEBUG_OPTIONS CXXFLAGS_DEBUG_SYMBOLS CFLAGS_DEBUG_SYMBOLS CXX_FLAG_DEPS @@ -4366,7 +4364,7 @@ #CUSTOM_AUTOCONF_INCLUDE # Do not change or remove the following line, it is needed for consistency checks: -DATE_WHEN_GENERATED=1432629750 +DATE_WHEN_GENERATED=1433337614 ############################################################################### # @@ -41837,15 +41835,81 @@ # no adjustment ;; slowdebug ) - # Add runtime stack smashing and undefined behavior checks - CFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1" - CXXFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1" + # Add runtime stack smashing and undefined behavior checks. + # Not all versions of gcc support -fstack-protector + STACK_PROTECTOR_CFLAG="-fstack-protector-all" + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if compiler supports \"$STACK_PROTECTOR_CFLAG\"" >&5 +$as_echo_n "checking if compiler supports \"$STACK_PROTECTOR_CFLAG\"... " >&6; } + supports=yes + + saved_cflags="$CFLAGS" + CFLAGS="$CFLAGS $STACK_PROTECTOR_CFLAG" + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +int i; +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + supports=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + CFLAGS="$saved_cflags" + + saved_cxxflags="$CXXFLAGS" + CXXFLAGS="$CXXFLAG $STACK_PROTECTOR_CFLAG" + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +int i; +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +else + supports=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + CXXFLAGS="$saved_cxxflags" + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $supports" >&5 +$as_echo "$supports" >&6; } + if test "x$supports" = "xyes" ; then + : + else + STACK_PROTECTOR_CFLAG="" + fi + + + CFLAGS_DEBUG_OPTIONS="$STACK_PROTECTOR_CFLAG --param ssp-buffer-size=1" + CXXFLAGS_DEBUG_OPTIONS="$STACK_PROTECTOR_CFLAG --param ssp-buffer-size=1" ;; esac fi - - # Optimization levels if test "x$TOOLCHAIN_TYPE" = xsolstudio; then CC_HIGHEST="$CC_HIGHEST -fns -fsimple -fsingle -xbuiltin=%all -xdepend -xrestrict -xlibmil" diff -r 1c0a1cee6054 -r 5b500c93ce48 corba/.hgtags --- a/corba/.hgtags Wed Jul 05 20:36:16 2017 +0200 +++ b/corba/.hgtags Wed Jul 05 20:37:12 2017 +0200 @@ -309,3 +309,4 @@ 0a5e5a7c3539e8bde73d9fe55750e49a49cb8dac jdk9-b64 afc1e295c4bf83f9a5dd539c29914edd4a754a3f jdk9-b65 44ee68f7dbacab24a45115fd6a8ccdc7eb6e8f0b jdk9-b66 +4418697e56f1f43597f55c7cb6573549c6117868 jdk9-b67 diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/.hgtags --- a/hotspot/.hgtags Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/.hgtags Wed Jul 05 20:37:12 2017 +0200 @@ -469,3 +469,4 @@ bf92b8db249cdfa5651ef954b6c0743a7e0ea4cd jdk9-b64 e7ae94c4f35e940ea423fc1dd260435df34a77c0 jdk9-b65 197e94e0dacddd16816f101d24fc0442ab518326 jdk9-b66 +d47dfabd16d48eb96a451edd1b61194a39ee0eb5 jdk9-b67 diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/cpu/aarch64/vm/aarch64.ad --- a/hotspot/src/cpu/aarch64/vm/aarch64.ad Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/cpu/aarch64/vm/aarch64.ad Wed Jul 05 20:37:12 2017 +0200 @@ -161,70 +161,165 @@ // the platform ABI treats v8-v15 as callee save). float registers // v16-v31 are SOC as per the platform spec - reg_def V0 ( SOC, SOC, Op_RegF, 0, v0->as_VMReg() ); - reg_def V0_H ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next() ); - reg_def V1 ( SOC, SOC, Op_RegF, 1, v1->as_VMReg() ); - reg_def V1_H ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next() ); - reg_def V2 ( SOC, SOC, Op_RegF, 2, v2->as_VMReg() ); - reg_def V2_H ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next() ); - reg_def V3 ( SOC, SOC, Op_RegF, 3, v3->as_VMReg() ); - reg_def V3_H ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next() ); - reg_def V4 ( SOC, SOC, Op_RegF, 4, v4->as_VMReg() ); - reg_def V4_H ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next() ); - reg_def V5 ( SOC, SOC, Op_RegF, 5, v5->as_VMReg() ); - reg_def V5_H ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next() ); - reg_def V6 ( SOC, SOC, Op_RegF, 6, v6->as_VMReg() ); - reg_def V6_H ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next() ); - reg_def V7 ( SOC, SOC, Op_RegF, 7, v7->as_VMReg() ); - reg_def V7_H ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next() ); - reg_def V8 ( SOC, SOE, Op_RegF, 8, v8->as_VMReg() ); - reg_def V8_H ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()->next() ); - reg_def V9 ( SOC, SOE, Op_RegF, 9, v9->as_VMReg() ); - reg_def V9_H ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()->next() ); - reg_def V10 ( SOC, SOE, Op_RegF, 10, v10->as_VMReg() ); - reg_def V10_H( SOC, SOE, Op_RegF, 10, v10->as_VMReg()->next()); - reg_def V11 ( SOC, SOE, Op_RegF, 11, v11->as_VMReg() ); - reg_def V11_H( SOC, SOE, Op_RegF, 11, v11->as_VMReg()->next()); - reg_def V12 ( SOC, SOE, Op_RegF, 12, v12->as_VMReg() ); - reg_def V12_H( SOC, SOE, Op_RegF, 12, v12->as_VMReg()->next()); - reg_def V13 ( SOC, SOE, Op_RegF, 13, v13->as_VMReg() ); - reg_def V13_H( SOC, SOE, Op_RegF, 13, v13->as_VMReg()->next()); - reg_def V14 ( SOC, SOE, Op_RegF, 14, v14->as_VMReg() ); - reg_def V14_H( SOC, SOE, Op_RegF, 14, v14->as_VMReg()->next()); - reg_def V15 ( SOC, SOE, Op_RegF, 15, v15->as_VMReg() ); - reg_def V15_H( SOC, SOE, Op_RegF, 15, v15->as_VMReg()->next()); - reg_def V16 ( SOC, SOC, Op_RegF, 16, v16->as_VMReg() ); - reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next()); - reg_def V17 ( SOC, SOC, Op_RegF, 17, v17->as_VMReg() ); - reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next()); - reg_def V18 ( SOC, SOC, Op_RegF, 18, v18->as_VMReg() ); - reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next()); - reg_def V19 ( SOC, SOC, Op_RegF, 19, v19->as_VMReg() ); - reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next()); - reg_def V20 ( SOC, SOC, Op_RegF, 20, v20->as_VMReg() ); - reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next()); - reg_def V21 ( SOC, SOC, Op_RegF, 21, v21->as_VMReg() ); - reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next()); - reg_def V22 ( SOC, SOC, Op_RegF, 22, v22->as_VMReg() ); - reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next()); - reg_def V23 ( SOC, SOC, Op_RegF, 23, v23->as_VMReg() ); - reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next()); - reg_def V24 ( SOC, SOC, Op_RegF, 24, v24->as_VMReg() ); - reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next()); - reg_def V25 ( SOC, SOC, Op_RegF, 25, v25->as_VMReg() ); - reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next()); - reg_def V26 ( SOC, SOC, Op_RegF, 26, v26->as_VMReg() ); - reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next()); - reg_def V27 ( SOC, SOC, Op_RegF, 27, v27->as_VMReg() ); - reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next()); - reg_def V28 ( SOC, SOC, Op_RegF, 28, v28->as_VMReg() ); - reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next()); - reg_def V29 ( SOC, SOC, Op_RegF, 29, v29->as_VMReg() ); - reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next()); - reg_def V30 ( SOC, SOC, Op_RegF, 30, v30->as_VMReg() ); - reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next()); - reg_def V31 ( SOC, SOC, Op_RegF, 31, v31->as_VMReg() ); - reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next()); + reg_def V0 ( SOC, SOC, Op_RegF, 0, v0->as_VMReg() ); + reg_def V0_H ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next() ); + reg_def V0_J ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(2) ); + reg_def V0_K ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(3) ); + + reg_def V1 ( SOC, SOC, Op_RegF, 1, v1->as_VMReg() ); + reg_def V1_H ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next() ); + reg_def V1_J ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(2) ); + reg_def V1_K ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(3) ); + + reg_def V2 ( SOC, SOC, Op_RegF, 2, v2->as_VMReg() ); + reg_def V2_H ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next() ); + reg_def V2_J ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(2) ); + reg_def V2_K ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(3) ); + + reg_def V3 ( SOC, SOC, Op_RegF, 3, v3->as_VMReg() ); + reg_def V3_H ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next() ); + reg_def V3_J ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(2) ); + reg_def V3_K ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(3) ); + + reg_def V4 ( SOC, SOC, Op_RegF, 4, v4->as_VMReg() ); + reg_def V4_H ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next() ); + reg_def V4_J ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(2) ); + reg_def V4_K ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(3) ); + + reg_def V5 ( SOC, SOC, Op_RegF, 5, v5->as_VMReg() ); + reg_def V5_H ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next() ); + reg_def V5_J ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(2) ); + reg_def V5_K ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(3) ); + + reg_def V6 ( SOC, SOC, Op_RegF, 6, v6->as_VMReg() ); + reg_def V6_H ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next() ); + reg_def V6_J ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(2) ); + reg_def V6_K ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(3) ); + + reg_def V7 ( SOC, SOC, Op_RegF, 7, v7->as_VMReg() ); + reg_def V7_H ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next() ); + reg_def V7_J ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(2) ); + reg_def V7_K ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(3) ); + + reg_def V8 ( SOC, SOC, Op_RegF, 8, v8->as_VMReg() ); + reg_def V8_H ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next() ); + reg_def V8_J ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(2) ); + reg_def V8_K ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(3) ); + + reg_def V9 ( SOC, SOC, Op_RegF, 9, v9->as_VMReg() ); + reg_def V9_H ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next() ); + reg_def V9_J ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(2) ); + reg_def V9_K ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(3) ); + + reg_def V10 ( SOC, SOC, Op_RegF, 10, v10->as_VMReg() ); + reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() ); + reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2)); + reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3)); + + reg_def V11 ( SOC, SOC, Op_RegF, 11, v11->as_VMReg() ); + reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() ); + reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2)); + reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3)); + + reg_def V12 ( SOC, SOC, Op_RegF, 12, v12->as_VMReg() ); + reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() ); + reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2)); + reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3)); + + reg_def V13 ( SOC, SOC, Op_RegF, 13, v13->as_VMReg() ); + reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() ); + reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2)); + reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3)); + + reg_def V14 ( SOC, SOC, Op_RegF, 14, v14->as_VMReg() ); + reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() ); + reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2)); + reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3)); + + reg_def V15 ( SOC, SOC, Op_RegF, 15, v15->as_VMReg() ); + reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() ); + reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2)); + reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3)); + + reg_def V16 ( SOC, SOC, Op_RegF, 16, v16->as_VMReg() ); + reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() ); + reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2)); + reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3)); + + reg_def V17 ( SOC, SOC, Op_RegF, 17, v17->as_VMReg() ); + reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() ); + reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2)); + reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3)); + + reg_def V18 ( SOC, SOC, Op_RegF, 18, v18->as_VMReg() ); + reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() ); + reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2)); + reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3)); + + reg_def V19 ( SOC, SOC, Op_RegF, 19, v19->as_VMReg() ); + reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() ); + reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2)); + reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3)); + + reg_def V20 ( SOC, SOC, Op_RegF, 20, v20->as_VMReg() ); + reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() ); + reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2)); + reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3)); + + reg_def V21 ( SOC, SOC, Op_RegF, 21, v21->as_VMReg() ); + reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() ); + reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2)); + reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3)); + + reg_def V22 ( SOC, SOC, Op_RegF, 22, v22->as_VMReg() ); + reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() ); + reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2)); + reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3)); + + reg_def V23 ( SOC, SOC, Op_RegF, 23, v23->as_VMReg() ); + reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() ); + reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2)); + reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3)); + + reg_def V24 ( SOC, SOC, Op_RegF, 24, v24->as_VMReg() ); + reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() ); + reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2)); + reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3)); + + reg_def V25 ( SOC, SOC, Op_RegF, 25, v25->as_VMReg() ); + reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() ); + reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2)); + reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3)); + + reg_def V26 ( SOC, SOC, Op_RegF, 26, v26->as_VMReg() ); + reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() ); + reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2)); + reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3)); + + reg_def V27 ( SOC, SOC, Op_RegF, 27, v27->as_VMReg() ); + reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() ); + reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2)); + reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3)); + + reg_def V28 ( SOC, SOC, Op_RegF, 28, v28->as_VMReg() ); + reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() ); + reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2)); + reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3)); + + reg_def V29 ( SOC, SOC, Op_RegF, 29, v29->as_VMReg() ); + reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() ); + reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2)); + reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3)); + + reg_def V30 ( SOC, SOC, Op_RegF, 30, v30->as_VMReg() ); + reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() ); + reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2)); + reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3)); + + reg_def V31 ( SOC, SOC, Op_RegF, 31, v31->as_VMReg() ); + reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() ); + reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2)); + reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3)); // ---------------------------- // Special Registers @@ -291,42 +386,42 @@ alloc_class chunk1( // no save - V16, V16_H, - V17, V17_H, - V18, V18_H, - V19, V19_H, - V20, V20_H, - V21, V21_H, - V22, V22_H, - V23, V23_H, - V24, V24_H, - V25, V25_H, - V26, V26_H, - V27, V27_H, - V28, V28_H, - V29, V29_H, - V30, V30_H, - V31, V31_H, + V16, V16_H, V16_J, V16_K, + V17, V17_H, V17_J, V17_K, + V18, V18_H, V18_J, V18_K, + V19, V19_H, V19_J, V19_K, + V20, V20_H, V20_J, V20_K, + V21, V21_H, V21_J, V21_K, + V22, V22_H, V22_J, V22_K, + V23, V23_H, V23_J, V23_K, + V24, V24_H, V24_J, V24_K, + V25, V25_H, V25_J, V25_K, + V26, V26_H, V26_J, V26_K, + V27, V27_H, V27_J, V27_K, + V28, V28_H, V28_J, V28_K, + V29, V29_H, V29_J, V29_K, + V30, V30_H, V30_J, V30_K, + V31, V31_H, V31_J, V31_K, // arg registers - V0, V0_H, - V1, V1_H, - V2, V2_H, - V3, V3_H, - V4, V4_H, - V5, V5_H, - V6, V6_H, - V7, V7_H, + V0, V0_H, V0_J, V0_K, + V1, V1_H, V1_J, V1_K, + V2, V2_H, V2_J, V2_K, + V3, V3_H, V3_J, V3_K, + V4, V4_H, V4_J, V4_K, + V5, V5_H, V5_J, V5_K, + V6, V6_H, V6_J, V6_K, + V7, V7_H, V7_J, V7_K, // non-volatiles - V8, V8_H, - V9, V9_H, - V10, V10_H, - V11, V11_H, - V12, V12_H, - V13, V13_H, - V14, V14_H, - V15, V15_H, + V8, V8_H, V8_J, V8_K, + V9, V9_H, V9_J, V9_K, + V10, V10_H, V10_J, V10_K, + V11, V11_H, V11_J, V11_K, + V12, V12_H, V12_J, V12_K, + V13, V13_H, V13_J, V13_K, + V14, V14_H, V14_J, V14_K, + V15, V15_H, V15_J, V15_K, ); alloc_class chunk2(RFLAGS); @@ -770,6 +865,42 @@ V31, V31_H ); +// Class for all 128bit vector registers +reg_class vectorx_reg( + V0, V0_H, V0_J, V0_K, + V1, V1_H, V1_J, V1_K, + V2, V2_H, V2_J, V2_K, + V3, V3_H, V3_J, V3_K, + V4, V4_H, V4_J, V4_K, + V5, V5_H, V5_J, V5_K, + V6, V6_H, V6_J, V6_K, + V7, V7_H, V7_J, V7_K, + V8, V8_H, V8_J, V8_K, + V9, V9_H, V9_J, V9_K, + V10, V10_H, V10_J, V10_K, + V11, V11_H, V11_J, V11_K, + V12, V12_H, V12_J, V12_K, + V13, V13_H, V13_J, V13_K, + V14, V14_H, V14_J, V14_K, + V15, V15_H, V15_J, V15_K, + V16, V16_H, V16_J, V16_K, + V17, V17_H, V17_J, V17_K, + V18, V18_H, V18_J, V18_K, + V19, V19_H, V19_J, V19_K, + V20, V20_H, V20_J, V20_K, + V21, V21_H, V21_J, V21_K, + V22, V22_H, V22_J, V22_K, + V23, V23_H, V23_J, V23_K, + V24, V24_H, V24_J, V24_K, + V25, V25_H, V25_J, V25_K, + V26, V26_H, V26_J, V26_K, + V27, V27_H, V27_J, V27_K, + V28, V28_H, V28_J, V28_K, + V29, V29_H, V29_J, V29_K, + V30, V30_H, V30_J, V30_K, + V31, V31_H, V31_J, V31_K +); + // Class for 128 bit register v0 reg_class v0_reg( V0, V0_H @@ -1964,7 +2095,7 @@ } // we have 32 float register * 2 halves - if (reg < 60 + 64) { + if (reg < 60 + 128) { return rc_float; } @@ -2000,6 +2131,78 @@ return 0; // Self copy, no move. } + if (bottom_type()->isa_vect() != NULL) { + uint len = 4; + if (cbuf) { + MacroAssembler _masm(cbuf); + uint ireg = ideal_reg(); + assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity"); + assert(ireg == Op_VecX, "sanity"); + if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) { + // stack->stack + int src_offset = ra_->reg2offset(src_lo); + int dst_offset = ra_->reg2offset(dst_lo); + assert((src_offset & 7) && (dst_offset & 7), "unaligned stack offset"); + len = 8; + if (src_offset < 512) { + __ ldp(rscratch1, rscratch2, Address(sp, src_offset)); + } else { + __ ldr(rscratch1, Address(sp, src_offset)); + __ ldr(rscratch2, Address(sp, src_offset+4)); + len += 4; + } + if (dst_offset < 512) { + __ stp(rscratch1, rscratch2, Address(sp, dst_offset)); + } else { + __ str(rscratch1, Address(sp, dst_offset)); + __ str(rscratch2, Address(sp, dst_offset+4)); + len += 4; + } + } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) { + __ orr(as_FloatRegister(Matcher::_regEncode[dst_lo]), __ T16B, + as_FloatRegister(Matcher::_regEncode[src_lo]), + as_FloatRegister(Matcher::_regEncode[src_lo])); + } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) { + __ str(as_FloatRegister(Matcher::_regEncode[src_lo]), __ Q, + Address(sp, ra_->reg2offset(dst_lo))); + } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) { + __ ldr(as_FloatRegister(Matcher::_regEncode[dst_lo]), __ Q, + Address(sp, ra_->reg2offset(src_lo))); + } else { + ShouldNotReachHere(); + } + } else if (st) { + if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) { + // stack->stack + int src_offset = ra_->reg2offset(src_lo); + int dst_offset = ra_->reg2offset(dst_lo); + if (src_offset < 512) { + st->print("ldp rscratch1, rscratch2, [sp, #%d]", src_offset); + } else { + st->print("ldr rscratch1, [sp, #%d]", src_offset); + st->print("\nldr rscratch2, [sp, #%d]", src_offset+4); + } + if (dst_offset < 512) { + st->print("\nstp rscratch1, rscratch2, [sp, #%d]", dst_offset); + } else { + st->print("\nstr rscratch1, [sp, #%d]", dst_offset); + st->print("\nstr rscratch2, [sp, #%d]", dst_offset+4); + } + st->print("\t# vector spill, stack to stack"); + } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) { + st->print("mov %s, %s\t# vector spill, reg to reg", + Matcher::regName[dst_lo], Matcher::regName[src_lo]); + } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) { + st->print("str %s, [sp, #%d]\t# vector spill, reg to stack", + Matcher::regName[src_lo], ra_->reg2offset(dst_lo)); + } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) { + st->print("ldr %s, [sp, #%d]\t# vector spill, stack to reg", + Matcher::regName[dst_lo], ra_->reg2offset(src_lo)); + } + } + return len; + } + switch (src_lo_rc) { case rc_int: if (dst_lo_rc == rc_int) { // gpr --> gpr copy @@ -2422,8 +2625,12 @@ // Vector width in bytes. const int Matcher::vector_width_in_bytes(BasicType bt) { - // TODO fixme - return 0; + int size = MIN2(16,(int)MaxVectorSize); + // Minimum 2 values in vector + if (size < 2*type2aelembytes(bt)) size = 0; + // But never < 4 + if (size < 4) size = 0; + return size; } // Limits on vector size (number of elements) loaded into vector. @@ -2431,22 +2638,19 @@ return vector_width_in_bytes(bt)/type2aelembytes(bt); } const int Matcher::min_vector_size(const BasicType bt) { - int max_size = max_vector_size(bt); - // Min size which can be loaded into vector is 4 bytes. - int size = (type2aelembytes(bt) == 1) ? 4 : 2; - return MIN2(size,max_size); + //return (type2aelembytes(bt) == 1) ? 4 : 2; + // For the moment, only support 1 vector size, 128 bits + return max_vector_size(bt); } // Vector ideal reg. const int Matcher::vector_ideal_reg(int len) { - // TODO fixme - return Op_RegD; + return Op_VecX; } // Only lowest bits of xmm reg are used for vector shift count. const int Matcher::vector_shift_count_ideal_reg(int size) { - // TODO fixme - return Op_RegL; + return Op_VecX; } // AES support not yet implemented @@ -2657,6 +2861,8 @@ typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr); typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr); +typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt, + MacroAssembler::SIMD_RegVariant T, const Address &adr); // Used for all non-volatile memory accesses. The use of // $mem->opcode() to discover whether this pattern uses sign-extended @@ -2724,6 +2930,18 @@ } } + static void loadStore(MacroAssembler masm, mem_vector_insn insn, + FloatRegister reg, MacroAssembler::SIMD_RegVariant T, + int opcode, Register base, int index, int size, int disp) + { + if (index == -1) { + (masm.*insn)(reg, T, Address(base, disp)); + } else { + assert(disp == 0, "unsupported address mode"); + (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size))); + } + } + %} @@ -2855,6 +3073,24 @@ as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} + enc_class aarch64_enc_ldrvS(vecX dst, memory mem) %{ + FloatRegister dst_reg = as_FloatRegister($dst$$reg); + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S, + $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); + %} + + enc_class aarch64_enc_ldrvD(vecX dst, memory mem) %{ + FloatRegister dst_reg = as_FloatRegister($dst$$reg); + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D, + $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); + %} + + enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{ + FloatRegister dst_reg = as_FloatRegister($dst$$reg); + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q, + $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); + %} + enc_class aarch64_enc_strb(iRegI src, memory mem) %{ Register src_reg = as_Register($src$$reg); loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(), @@ -2923,6 +3159,24 @@ as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} + enc_class aarch64_enc_strvS(vecX src, memory mem) %{ + FloatRegister src_reg = as_FloatRegister($src$$reg); + loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S, + $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); + %} + + enc_class aarch64_enc_strvD(vecX src, memory mem) %{ + FloatRegister src_reg = as_FloatRegister($src$$reg); + loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D, + $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); + %} + + enc_class aarch64_enc_strvQ(vecX src, memory mem) %{ + FloatRegister src_reg = as_FloatRegister($src$$reg); + loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q, + $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); + %} + // END Non-volatile memory access // volatile loads and stores @@ -4933,6 +5187,16 @@ interface(REG_INTER); %} +operand vecX() +%{ + constraint(ALLOC_IN_RC(vectorx_reg)); + match(VecX); + + op_cost(0); + format %{ %} + interface(REG_INTER); +%} + operand vRegD_V0() %{ constraint(ALLOC_IN_RC(v0_reg)); @@ -5505,6 +5769,7 @@ interface(REG_INTER) %} +opclass vmem(indirect, indIndex, indOffI, indOffL); //----------OPERAND CLASSES---------------------------------------------------- // Operand Classes are groups of operands that are used as to simplify @@ -12926,7 +13191,919 @@ ins_pipe(pipe_class_empty); %} - +// ====================VECTOR INSTRUCTIONS===================================== + +// Load vector (32 bits) +instruct loadV4(vecX dst, vmem mem) +%{ + predicate(n->as_LoadVector()->memory_size() == 4); + match(Set dst (LoadVector mem)); + ins_cost(4 * INSN_COST); + format %{ "ldrs $dst,$mem\t# vector (32 bits)" %} + ins_encode( aarch64_enc_ldrvS(dst, mem) ); + ins_pipe(pipe_class_memory); +%} + +// Load vector (64 bits) +instruct loadV8(vecX dst, vmem mem) +%{ + predicate(n->as_LoadVector()->memory_size() == 8); + match(Set dst (LoadVector mem)); + ins_cost(4 * INSN_COST); + format %{ "ldrd $dst,$mem\t# vector (64 bits)" %} + ins_encode( aarch64_enc_ldrvD(dst, mem) ); + ins_pipe(pipe_class_memory); +%} + +// Load Vector (128 bits) +instruct loadV16(vecX dst, vmem mem) +%{ + predicate(n->as_LoadVector()->memory_size() == 16); + match(Set dst (LoadVector mem)); + ins_cost(4 * INSN_COST); + format %{ "ldrq $dst,$mem\t# vector (128 bits)" %} + ins_encode( aarch64_enc_ldrvQ(dst, mem) ); + ins_pipe(pipe_class_memory); +%} + +// Store Vector (32 bits) +instruct storeV4(vecX src, vmem mem) +%{ + predicate(n->as_StoreVector()->memory_size() == 4); + match(Set mem (StoreVector mem src)); + ins_cost(4 * INSN_COST); + format %{ "strs $mem,$src\t# vector (32 bits)" %} + ins_encode( aarch64_enc_strvS(src, mem) ); + ins_pipe(pipe_class_memory); +%} + +// Store Vector (64 bits) +instruct storeV8(vecX src, vmem mem) +%{ + predicate(n->as_StoreVector()->memory_size() == 8); + match(Set mem (StoreVector mem src)); + ins_cost(4 * INSN_COST); + format %{ "strd $mem,$src\t# vector (64 bits)" %} + ins_encode( aarch64_enc_strvD(src, mem) ); + ins_pipe(pipe_class_memory); +%} + +// Store Vector (128 bits) +instruct storeV16(vecX src, vmem mem) +%{ + predicate(n->as_StoreVector()->memory_size() == 16); + match(Set mem (StoreVector mem src)); + ins_cost(4 * INSN_COST); + format %{ "strq $mem,$src\t# vector (128 bits)" %} + ins_encode( aarch64_enc_strvQ(src, mem) ); + ins_pipe(pipe_class_memory); +%} + +instruct replicate16B(vecX dst, iRegIorL2I src) +%{ + match(Set dst (ReplicateB src)); + ins_cost(INSN_COST); + format %{ "dup $dst, $src\t# vector (16B)" %} + ins_encode %{ + __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct replicate16B_imm(vecX dst, immI con) +%{ + match(Set dst (ReplicateB con)); + ins_cost(INSN_COST); + format %{ "movi $dst, $con\t# vector(16B)" %} + ins_encode %{ + __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant); + %} + ins_pipe(pipe_class_default); +%} + +instruct replicate8S(vecX dst, iRegIorL2I src) +%{ + match(Set dst (ReplicateS src)); + ins_cost(INSN_COST); + format %{ "dup $dst, $src\t# vector (8S)" %} + ins_encode %{ + __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct replicate8S_imm(vecX dst, immI con) +%{ + match(Set dst (ReplicateS con)); + ins_cost(INSN_COST); + format %{ "movi $dst, $con\t# vector(8H)" %} + ins_encode %{ + __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant); + %} + ins_pipe(pipe_class_default); +%} + +instruct replicate4I(vecX dst, iRegIorL2I src) +%{ + match(Set dst (ReplicateI src)); + ins_cost(INSN_COST); + format %{ "dup $dst, $src\t# vector (4I)" %} + ins_encode %{ + __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct replicate4I_imm(vecX dst, immI con) +%{ + match(Set dst (ReplicateI con)); + ins_cost(INSN_COST); + format %{ "movi $dst, $con\t# vector(4I)" %} + ins_encode %{ + __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant); + %} + ins_pipe(pipe_class_default); +%} + +instruct replicate2L(vecX dst, iRegL src) +%{ + match(Set dst (ReplicateL src)); + ins_cost(INSN_COST); + format %{ "dup $dst, $src\t# vector (2L)" %} + ins_encode %{ + __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct replicate2L_zero(vecX dst, immI0 zero) +%{ + match(Set dst (ReplicateI zero)); + ins_cost(INSN_COST); + format %{ "movi $dst, $zero\t# vector(4I)" %} + ins_encode %{ + __ eor(as_FloatRegister($dst$$reg), __ T16B, + as_FloatRegister($dst$$reg), + as_FloatRegister($dst$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct replicate4F(vecX dst, vRegF src) +%{ + match(Set dst (ReplicateF src)); + ins_cost(INSN_COST); + format %{ "dup $dst, $src\t# vector (4F)" %} + ins_encode %{ + __ dup(as_FloatRegister($dst$$reg), __ T4S, + as_FloatRegister($src$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct replicate2D(vecX dst, vRegD src) +%{ + match(Set dst (ReplicateD src)); + ins_cost(INSN_COST); + format %{ "dup $dst, $src\t# vector (2D)" %} + ins_encode %{ + __ dup(as_FloatRegister($dst$$reg), __ T2D, + as_FloatRegister($src$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +// ====================REDUCTION ARITHMETIC==================================== + +instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2) +%{ + match(Set dst (AddReductionVI src1 src2)); + ins_cost(INSN_COST); + effect(TEMP tmp, TEMP tmp2); + format %{ "addv $tmp, T4S, $src2\n\t" + "umov $tmp2, $tmp, S, 0\n\t" + "addw $dst, $tmp2, $src1\t add reduction4i" + %} + ins_encode %{ + __ addv(as_FloatRegister($tmp$$reg), __ T4S, + as_FloatRegister($src2$$reg)); + __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0); + __ addw($dst$$Register, $tmp2$$Register, $src1$$Register); + %} + ins_pipe(pipe_class_default); +%} + +instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2) +%{ + match(Set dst (MulReductionVI src1 src2)); + ins_cost(INSN_COST); + effect(TEMP tmp, TEMP tmp2, TEMP dst); + format %{ "ins $tmp, $src2, 0, 1\n\t" + "mul $tmp, $tmp, $src2\n\t" + "umov $tmp2, $tmp, S, 0\n\t" + "mul $dst, $tmp2, $src1\n\t" + "umov $tmp2, $tmp, S, 1\n\t" + "mul $dst, $tmp2, $dst\t mul reduction4i\n\t" + %} + ins_encode %{ + __ ins(as_FloatRegister($tmp$$reg), __ D, + as_FloatRegister($src2$$reg), 0, 1); + __ mulv(as_FloatRegister($tmp$$reg), __ T2S, + as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg)); + __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0); + __ mul($dst$$Register, $tmp2$$Register, $src1$$Register); + __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1); + __ mul($dst$$Register, $tmp2$$Register, $dst$$Register); + %} + ins_pipe(pipe_class_default); +%} + +instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp) +%{ + match(Set dst (AddReductionVF src1 src2)); + ins_cost(INSN_COST); + effect(TEMP tmp, TEMP dst); + format %{ "fadds $dst, $src1, $src2\n\t" + "ins $tmp, S, $src2, 0, 1\n\t" + "fadds $dst, $dst, $tmp\n\t" + "ins $tmp, S, $src2, 0, 2\n\t" + "fadds $dst, $dst, $tmp\n\t" + "ins $tmp, S, $src2, 0, 3\n\t" + "fadds $dst, $dst, $tmp\t add reduction4f" + %} + ins_encode %{ + __ fadds(as_FloatRegister($dst$$reg), + as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg)); + __ ins(as_FloatRegister($tmp$$reg), __ S, + as_FloatRegister($src2$$reg), 0, 1); + __ fadds(as_FloatRegister($dst$$reg), + as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg)); + __ ins(as_FloatRegister($tmp$$reg), __ S, + as_FloatRegister($src2$$reg), 0, 2); + __ fadds(as_FloatRegister($dst$$reg), + as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg)); + __ ins(as_FloatRegister($tmp$$reg), __ S, + as_FloatRegister($src2$$reg), 0, 3); + __ fadds(as_FloatRegister($dst$$reg), + as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp) +%{ + match(Set dst (MulReductionVF src1 src2)); + ins_cost(INSN_COST); + effect(TEMP tmp, TEMP dst); + format %{ "fmuls $dst, $src1, $src2\n\t" + "ins $tmp, S, $src2, 0, 1\n\t" + "fmuls $dst, $dst, $tmp\n\t" + "ins $tmp, S, $src2, 0, 2\n\t" + "fmuls $dst, $dst, $tmp\n\t" + "ins $tmp, S, $src2, 0, 3\n\t" + "fmuls $dst, $dst, $tmp\t add reduction4f" + %} + ins_encode %{ + __ fmuls(as_FloatRegister($dst$$reg), + as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg)); + __ ins(as_FloatRegister($tmp$$reg), __ S, + as_FloatRegister($src2$$reg), 0, 1); + __ fmuls(as_FloatRegister($dst$$reg), + as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg)); + __ ins(as_FloatRegister($tmp$$reg), __ S, + as_FloatRegister($src2$$reg), 0, 2); + __ fmuls(as_FloatRegister($dst$$reg), + as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg)); + __ ins(as_FloatRegister($tmp$$reg), __ S, + as_FloatRegister($src2$$reg), 0, 3); + __ fmuls(as_FloatRegister($dst$$reg), + as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp) +%{ + match(Set dst (AddReductionVD src1 src2)); + ins_cost(INSN_COST); + effect(TEMP tmp, TEMP dst); + format %{ "faddd $dst, $src1, $src2\n\t" + "ins $tmp, D, $src2, 0, 1\n\t" + "faddd $dst, $dst, $tmp\t add reduction2d" + %} + ins_encode %{ + __ faddd(as_FloatRegister($dst$$reg), + as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg)); + __ ins(as_FloatRegister($tmp$$reg), __ D, + as_FloatRegister($src2$$reg), 0, 1); + __ faddd(as_FloatRegister($dst$$reg), + as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp) +%{ + match(Set dst (MulReductionVD src1 src2)); + ins_cost(INSN_COST); + effect(TEMP tmp, TEMP dst); + format %{ "fmuld $dst, $src1, $src2\n\t" + "ins $tmp, D, $src2, 0, 1\n\t" + "fmuld $dst, $dst, $tmp\t add reduction2d" + %} + ins_encode %{ + __ fmuld(as_FloatRegister($dst$$reg), + as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg)); + __ ins(as_FloatRegister($tmp$$reg), __ D, + as_FloatRegister($src2$$reg), 0, 1); + __ fmuld(as_FloatRegister($dst$$reg), + as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +// ====================VECTOR ARITHMETIC======================================= + +// --------------------------------- ADD -------------------------------------- + +instruct vadd16B(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (AddVB src1 src2)); + ins_cost(INSN_COST); + format %{ "addv $dst,$src1,$src2\t# vector (16B)" %} + ins_encode %{ + __ addv(as_FloatRegister($dst$$reg), __ T16B, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vadd8S(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (AddVS src1 src2)); + ins_cost(INSN_COST); + format %{ "addv $dst,$src1,$src2\t# vector (8H)" %} + ins_encode %{ + __ addv(as_FloatRegister($dst$$reg), __ T8H, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vadd4I(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (AddVI src1 src2)); + ins_cost(INSN_COST); + format %{ "addv $dst,$src1,$src2\t# vector (4S)" %} + ins_encode %{ + __ addv(as_FloatRegister($dst$$reg), __ T4S, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vadd2L(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (AddVL src1 src2)); + ins_cost(INSN_COST); + format %{ "addv $dst,$src1,$src2\t# vector (2L)" %} + ins_encode %{ + __ addv(as_FloatRegister($dst$$reg), __ T2D, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vadd4F(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (AddVF src1 src2)); + ins_cost(INSN_COST); + format %{ "fadd $dst,$src1,$src2\t# vector (4S)" %} + ins_encode %{ + __ fadd(as_FloatRegister($dst$$reg), __ T4S, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vadd2D(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (AddVD src1 src2)); + ins_cost(INSN_COST); + format %{ "fadd $dst,$src1,$src2\t# vector (2D)" %} + ins_encode %{ + __ fadd(as_FloatRegister($dst$$reg), __ T2D, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +// --------------------------------- SUB -------------------------------------- + +instruct vsub16B(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (SubVB src1 src2)); + ins_cost(INSN_COST); + format %{ "subv $dst,$src1,$src2\t# vector (16B)" %} + ins_encode %{ + __ subv(as_FloatRegister($dst$$reg), __ T16B, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsub8S(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (SubVS src1 src2)); + ins_cost(INSN_COST); + format %{ "subv $dst,$src1,$src2\t# vector (8H)" %} + ins_encode %{ + __ subv(as_FloatRegister($dst$$reg), __ T8H, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsub4I(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (SubVI src1 src2)); + ins_cost(INSN_COST); + format %{ "subv $dst,$src1,$src2\t# vector (4S)" %} + ins_encode %{ + __ subv(as_FloatRegister($dst$$reg), __ T4S, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsub2L(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (SubVL src1 src2)); + ins_cost(INSN_COST); + format %{ "subv $dst,$src1,$src2\t# vector (2L)" %} + ins_encode %{ + __ subv(as_FloatRegister($dst$$reg), __ T2D, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsub4F(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (SubVF src1 src2)); + ins_cost(INSN_COST); + format %{ "fsub $dst,$src1,$src2\t# vector (4S)" %} + ins_encode %{ + __ fsub(as_FloatRegister($dst$$reg), __ T4S, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsub2D(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (SubVD src1 src2)); + ins_cost(INSN_COST); + format %{ "fsub $dst,$src1,$src2\t# vector (2D)" %} + ins_encode %{ + __ fsub(as_FloatRegister($dst$$reg), __ T2D, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +// --------------------------------- MUL -------------------------------------- + +instruct vmul8S(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (MulVS src1 src2)); + ins_cost(INSN_COST); + format %{ "mulv $dst,$src1,$src2\t# vector (8H)" %} + ins_encode %{ + __ mulv(as_FloatRegister($dst$$reg), __ T8H, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vmul4I(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (MulVI src1 src2)); + ins_cost(INSN_COST); + format %{ "mulv $dst,$src1,$src2\t# vector (4S)" %} + ins_encode %{ + __ mulv(as_FloatRegister($dst$$reg), __ T4S, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vmul4F(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (MulVF src1 src2)); + ins_cost(INSN_COST); + format %{ "fmul $dst,$src1,$src2\t# vector (4S)" %} + ins_encode %{ + __ fmul(as_FloatRegister($dst$$reg), __ T4S, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vmul2D(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (MulVD src1 src2)); + ins_cost(INSN_COST); + format %{ "fmul $dst,$src1,$src2\t# vector (2D)" %} + ins_encode %{ + __ fmul(as_FloatRegister($dst$$reg), __ T2D, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +// --------------------------------- DIV -------------------------------------- + +instruct vdiv4F(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (DivVF src1 src2)); + ins_cost(INSN_COST); + format %{ "fdiv $dst,$src1,$src2\t# vector (4S)" %} + ins_encode %{ + __ fdiv(as_FloatRegister($dst$$reg), __ T4S, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vdiv2D(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (DivVD src1 src2)); + ins_cost(INSN_COST); + format %{ "fdiv $dst,$src1,$src2\t# vector (2D)" %} + ins_encode %{ + __ fdiv(as_FloatRegister($dst$$reg), __ T2D, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +// --------------------------------- AND -------------------------------------- + +instruct vand16B(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (AndV src1 src2)); + ins_cost(INSN_COST); + format %{ "and $dst,$src1,$src2\t# vector (16B)" %} + ins_encode %{ + __ andr(as_FloatRegister($dst$$reg), __ T16B, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +// --------------------------------- OR --------------------------------------- + +instruct vor16B(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (OrV src1 src2)); + ins_cost(INSN_COST); + format %{ "orr $dst,$src1,$src2\t# vector (16B)" %} + ins_encode %{ + __ orr(as_FloatRegister($dst$$reg), __ T16B, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +// --------------------------------- XOR -------------------------------------- + +instruct vxor16B(vecX dst, vecX src1, vecX src2) +%{ + match(Set dst (XorV src1 src2)); + ins_cost(INSN_COST); + format %{ "xor $dst,$src1,$src2\t# vector (16B)" %} + ins_encode %{ + __ eor(as_FloatRegister($dst$$reg), __ T16B, + as_FloatRegister($src1$$reg), + as_FloatRegister($src2$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +// ------------------------------ Shift --------------------------------------- + +instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{ + match(Set dst (LShiftCntV cnt)); + format %{ "dup $dst, $cnt\t# shift count (vecX)" %} + ins_encode %{ + __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +// Right shifts on aarch64 SIMD are implemented as left shift by -ve amount +instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{ + match(Set dst (RShiftCntV cnt)); + format %{ "dup $dst, $cnt\t# shift count (vecX)\n\tneg $dst, $dst\t T16B" %} + ins_encode %{ + __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg)); + __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsll16B(vecX dst, vecX src, vecX shift) %{ + match(Set dst (LShiftVB src shift)); + match(Set dst (RShiftVB src shift)); + ins_cost(INSN_COST); + format %{ "sshl $dst,$src,$shift\t# vector (16B)" %} + ins_encode %{ + __ sshl(as_FloatRegister($dst$$reg), __ T16B, + as_FloatRegister($src$$reg), + as_FloatRegister($shift$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsrl16B(vecX dst, vecX src, vecX shift) %{ + match(Set dst (URShiftVB src shift)); + ins_cost(INSN_COST); + format %{ "ushl $dst,$src,$shift\t# vector (16B)" %} + ins_encode %{ + __ ushl(as_FloatRegister($dst$$reg), __ T16B, + as_FloatRegister($src$$reg), + as_FloatRegister($shift$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{ + match(Set dst (LShiftVB src shift)); + ins_cost(INSN_COST); + format %{ "shl $dst, $src, $shift\t# vector (16B)" %} + ins_encode %{ + int sh = (int)$shift$$constant & 31; + if (sh >= 8) { + __ eor(as_FloatRegister($dst$$reg), __ T16B, + as_FloatRegister($src$$reg), + as_FloatRegister($src$$reg)); + } else { + __ shl(as_FloatRegister($dst$$reg), __ T16B, + as_FloatRegister($src$$reg), sh); + } + %} + ins_pipe(pipe_class_default); +%} + +instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{ + match(Set dst (RShiftVB src shift)); + ins_cost(INSN_COST); + format %{ "sshr $dst, $src, $shift\t# vector (16B)" %} + ins_encode %{ + int sh = (int)$shift$$constant & 31; + if (sh >= 8) sh = 7; + sh = -sh & 7; + __ sshr(as_FloatRegister($dst$$reg), __ T16B, + as_FloatRegister($src$$reg), sh); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{ + match(Set dst (URShiftVB src shift)); + ins_cost(INSN_COST); + format %{ "ushr $dst, $src, $shift\t# vector (16B)" %} + ins_encode %{ + int sh = (int)$shift$$constant & 31; + if (sh >= 8) { + __ eor(as_FloatRegister($dst$$reg), __ T16B, + as_FloatRegister($src$$reg), + as_FloatRegister($src$$reg)); + } else { + __ ushr(as_FloatRegister($dst$$reg), __ T16B, + as_FloatRegister($src$$reg), -sh & 7); + } + %} + ins_pipe(pipe_class_default); +%} + +instruct vsll8S(vecX dst, vecX src, vecX shift) %{ + match(Set dst (LShiftVS src shift)); + match(Set dst (RShiftVS src shift)); + ins_cost(INSN_COST); + format %{ "sshl $dst,$src,$shift\t# vector (8H)" %} + ins_encode %{ + __ sshl(as_FloatRegister($dst$$reg), __ T8H, + as_FloatRegister($src$$reg), + as_FloatRegister($shift$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsrl8S(vecX dst, vecX src, vecX shift) %{ + match(Set dst (URShiftVS src shift)); + ins_cost(INSN_COST); + format %{ "ushl $dst,$src,$shift\t# vector (8H)" %} + ins_encode %{ + __ ushl(as_FloatRegister($dst$$reg), __ T8H, + as_FloatRegister($src$$reg), + as_FloatRegister($shift$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{ + match(Set dst (LShiftVS src shift)); + ins_cost(INSN_COST); + format %{ "shl $dst, $src, $shift\t# vector (8H)" %} + ins_encode %{ + int sh = (int)$shift$$constant & 31; + if (sh >= 16) { + __ eor(as_FloatRegister($dst$$reg), __ T16B, + as_FloatRegister($src$$reg), + as_FloatRegister($src$$reg)); + } else { + __ shl(as_FloatRegister($dst$$reg), __ T8H, + as_FloatRegister($src$$reg), sh); + } + %} + ins_pipe(pipe_class_default); +%} + +instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{ + match(Set dst (RShiftVS src shift)); + ins_cost(INSN_COST); + format %{ "sshr $dst, $src, $shift\t# vector (8H)" %} + ins_encode %{ + int sh = (int)$shift$$constant & 31; + if (sh >= 16) sh = 15; + sh = -sh & 15; + __ sshr(as_FloatRegister($dst$$reg), __ T8H, + as_FloatRegister($src$$reg), sh); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{ + match(Set dst (URShiftVS src shift)); + ins_cost(INSN_COST); + format %{ "ushr $dst, $src, $shift\t# vector (8H)" %} + ins_encode %{ + int sh = (int)$shift$$constant & 31; + if (sh >= 16) { + __ eor(as_FloatRegister($dst$$reg), __ T16B, + as_FloatRegister($src$$reg), + as_FloatRegister($src$$reg)); + } else { + __ ushr(as_FloatRegister($dst$$reg), __ T8H, + as_FloatRegister($src$$reg), -sh & 15); + } + %} + ins_pipe(pipe_class_default); +%} + +instruct vsll4I(vecX dst, vecX src, vecX shift) %{ + match(Set dst (LShiftVI src shift)); + match(Set dst (RShiftVI src shift)); + ins_cost(INSN_COST); + format %{ "sshl $dst,$src,$shift\t# vector (4S)" %} + ins_encode %{ + __ sshl(as_FloatRegister($dst$$reg), __ T4S, + as_FloatRegister($src$$reg), + as_FloatRegister($shift$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsrl4I(vecX dst, vecX src, vecX shift) %{ + match(Set dst (URShiftVI src shift)); + ins_cost(INSN_COST); + format %{ "ushl $dst,$src,$shift\t# vector (4S)" %} + ins_encode %{ + __ ushl(as_FloatRegister($dst$$reg), __ T4S, + as_FloatRegister($src$$reg), + as_FloatRegister($shift$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{ + match(Set dst (LShiftVI src shift)); + ins_cost(INSN_COST); + format %{ "shl $dst, $src, $shift\t# vector (4S)" %} + ins_encode %{ + __ shl(as_FloatRegister($dst$$reg), __ T4S, + as_FloatRegister($src$$reg), + (int)$shift$$constant & 31); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{ + match(Set dst (RShiftVI src shift)); + ins_cost(INSN_COST); + format %{ "sshr $dst, $src, $shift\t# vector (4S)" %} + ins_encode %{ + __ sshr(as_FloatRegister($dst$$reg), __ T4S, + as_FloatRegister($src$$reg), + -(int)$shift$$constant & 31); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{ + match(Set dst (URShiftVI src shift)); + ins_cost(INSN_COST); + format %{ "ushr $dst, $src, $shift\t# vector (4S)" %} + ins_encode %{ + __ ushr(as_FloatRegister($dst$$reg), __ T4S, + as_FloatRegister($src$$reg), + -(int)$shift$$constant & 31); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsll2L(vecX dst, vecX src, vecX shift) %{ + match(Set dst (LShiftVL src shift)); + match(Set dst (RShiftVL src shift)); + ins_cost(INSN_COST); + format %{ "sshl $dst,$src,$shift\t# vector (2D)" %} + ins_encode %{ + __ sshl(as_FloatRegister($dst$$reg), __ T2D, + as_FloatRegister($src$$reg), + as_FloatRegister($shift$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsrl2L(vecX dst, vecX src, vecX shift) %{ + match(Set dst (URShiftVL src shift)); + ins_cost(INSN_COST); + format %{ "ushl $dst,$src,$shift\t# vector (2D)" %} + ins_encode %{ + __ ushl(as_FloatRegister($dst$$reg), __ T2D, + as_FloatRegister($src$$reg), + as_FloatRegister($shift$$reg)); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{ + match(Set dst (LShiftVL src shift)); + ins_cost(INSN_COST); + format %{ "shl $dst, $src, $shift\t# vector (2D)" %} + ins_encode %{ + __ shl(as_FloatRegister($dst$$reg), __ T2D, + as_FloatRegister($src$$reg), + (int)$shift$$constant & 63); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{ + match(Set dst (RShiftVL src shift)); + ins_cost(INSN_COST); + format %{ "sshr $dst, $src, $shift\t# vector (2D)" %} + ins_encode %{ + __ sshr(as_FloatRegister($dst$$reg), __ T2D, + as_FloatRegister($src$$reg), + -(int)$shift$$constant & 63); + %} + ins_pipe(pipe_class_default); +%} + +instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{ + match(Set dst (URShiftVL src shift)); + ins_cost(INSN_COST); + format %{ "ushr $dst, $src, $shift\t# vector (2D)" %} + ins_encode %{ + __ ushr(as_FloatRegister($dst$$reg), __ T2D, + as_FloatRegister($src$$reg), + -(int)$shift$$constant & 63); + %} + ins_pipe(pipe_class_default); +%} //----------PEEPHOLE RULES----------------------------------------------------- // These must follow all instruction definitions as they use the names diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp --- a/hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -466,6 +466,11 @@ case base_plus_offset: { unsigned size = i->get(31, 30); + if (i->get(26, 26) && i->get(23, 23)) { + // SIMD Q Type - Size = 128 bits + assert(size == 0, "bad size"); + size = 0b100; + } unsigned mask = (1 << size) - 1; if (_offset < 0 || _offset & mask) { @@ -1888,9 +1893,18 @@ }; enum SIMD_RegVariant { - S32, D64, Q128 + B, H, S, D, Q }; +#define INSN(NAME, op) \ + void NAME(FloatRegister Rt, SIMD_RegVariant T, const Address &adr) { \ + ld_st2((Register)Rt, adr, (int)T & 3, op + ((T==Q) ? 0b10:0b00), 1); \ + } \ + + INSN(ldr, 1); + INSN(str, 0); + +#undef INSN private: @@ -1997,27 +2011,87 @@ rf(Vm, 16), f(0b000111, 15, 10), rf(Vn, 5), rf(Vd, 0); \ } - INSN(eor, 0b101110001); - INSN(orr, 0b001110101); + INSN(eor, 0b101110001); + INSN(orr, 0b001110101); INSN(andr, 0b001110001); - INSN(bic, 0b001110011); - INSN(bif, 0b101110111); - INSN(bit, 0b101110101); - INSN(bsl, 0b101110011); - INSN(orn, 0b001110111); + INSN(bic, 0b001110011); + INSN(bif, 0b101110111); + INSN(bit, 0b101110101); + INSN(bsl, 0b101110011); + INSN(orn, 0b001110111); + +#undef INSN + +#define INSN(NAME, opc, opc2) \ + void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ + starti; \ + f(0, 31), f((int)T & 1, 30), f(opc, 29), f(0b01110, 28, 24); \ + f((int)T >> 1, 23, 22), f(1, 21), rf(Vm, 16), f(opc2, 15, 10); \ + rf(Vn, 5), rf(Vd, 0); \ + } + + INSN(addv, 0, 0b100001); + INSN(subv, 1, 0b100001); + INSN(mulv, 0, 0b100111); + INSN(sshl, 0, 0b010001); + INSN(ushl, 1, 0b010001); #undef INSN -#define INSN(NAME, opc) \ - void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ +#define INSN(NAME, opc, opc2) \ + void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ starti; \ f(0, 31), f((int)T & 1, 30), f(opc, 29), f(0b01110, 28, 24); \ - f((int)T >> 1, 23, 22), f(1, 21), rf(Vm, 16), f(0b100001, 15, 10); \ + f((int)T >> 1, 23, 22), f(opc2, 21, 10); \ rf(Vn, 5), rf(Vd, 0); \ } - INSN(addv, 0); - INSN(subv, 1); + INSN(absr, 0, 0b100000101110); + INSN(negr, 1, 0b100000101110); + INSN(notr, 1, 0b100000010110); + INSN(addv, 0, 0b110001101110); + +#undef INSN + +#define INSN(NAME, op0, cmode0) \ + void NAME(FloatRegister Vd, SIMD_Arrangement T, unsigned imm8, unsigned lsl = 0) { \ + unsigned cmode = cmode0; \ + unsigned op = op0; \ + starti; \ + assert(lsl == 0 || \ + ((T == T4H || T == T8H) && lsl == 8) || \ + ((T == T2S || T == T4S) && ((lsl >> 3) < 4)), "invalid shift"); \ + cmode |= lsl >> 2; \ + if (T == T4H || T == T8H) cmode |= 0b1000; \ + if (!(T == T4H || T == T8H || T == T2S || T == T4S)) { \ + assert(op == 0 && cmode0 == 0, "must be MOVI"); \ + cmode = 0b1110; \ + if (T == T1D || T == T2D) op = 1; \ + } \ + f(0, 31), f((int)T & 1, 30), f(op, 29), f(0b0111100000, 28, 19); \ + f(imm8 >> 5, 18, 16), f(cmode, 15, 12), f(0x01, 11, 10), f(imm8 & 0b11111, 9, 5); \ + rf(Vd, 0); \ + } + + INSN(movi, 0, 0); + INSN(orri, 0, 1); + INSN(mvni, 1, 0); + INSN(bici, 1, 1); + +#undef INSN + +#define INSN(NAME, op1, op2, op3) \ + void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ + starti; \ + assert(T == T2S || T == T4S || T == T2D, "invalid arrangement"); \ + f(0, 31), f((int)T & 1, 30), f(op1, 29), f(0b01110, 28, 24), f(op2, 23); \ + f(T==T2D ? 1:0, 22); f(1, 21), rf(Vm, 16), f(op3, 15, 10), rf(Vn, 5), rf(Vd, 0); \ + } + + INSN(fadd, 0, 0, 0b110101); + INSN(fdiv, 1, 0, 0b111111); + INSN(fmul, 1, 0, 0b110111); + INSN(fsub, 0, 1, 0b110101); #undef INSN @@ -2064,19 +2138,40 @@ #undef INSN - void shl(FloatRegister Vd, FloatRegister Vn, SIMD_Arrangement T, int shift){ + void ins(FloatRegister Vd, SIMD_RegVariant T, FloatRegister Vn, int didx, int sidx) { + starti; + assert(T != Q, "invalid register variant"); + f(0b01101110000, 31, 21), f(((didx<<1)|1)<<(int)T, 20, 16), f(0, 15); + f(sidx<<(int)T, 14, 11), f(1, 10), rf(Vn, 5), rf(Vd, 0); + } + + void umov(Register Rd, FloatRegister Vn, SIMD_RegVariant T, int idx) { starti; - /* The encodings for the immh:immb fields (bits 22:16) are - * 0001 xxx 8B/16B, shift = xxx - * 001x xxx 4H/8H, shift = xxxx - * 01xx xxx 2S/4S, shift = xxxxx - * 1xxx xxx 1D/2D, shift = xxxxxx (1D is RESERVED) - */ - assert((1 << ((T>>1)+3)) > shift, "Invalid Shift value"); - f(0, 31), f(T & 1, 30), f(0b0011110, 29, 23), f((1 << ((T>>1)+3))|shift, 22, 16); - f(0b010101, 15, 10), rf(Vn, 5), rf(Vd, 0); + f(0, 31), f(T==D ? 1:0, 30), f(0b001110000, 29, 21); + f(((idx<<1)|1)<<(int)T, 20, 16), f(0b001111, 15, 10); + rf(Vn, 5), rf(Rd, 0); } +#define INSN(NAME, opc, opc2) \ + void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int shift){ \ + starti; \ + /* The encodings for the immh:immb fields (bits 22:16) are \ + * 0001 xxx 8B/16B, shift = xxx \ + * 001x xxx 4H/8H, shift = xxxx \ + * 01xx xxx 2S/4S, shift = xxxxx \ + * 1xxx xxx 1D/2D, shift = xxxxxx (1D is RESERVED) \ + */ \ + assert((1 << ((T>>1)+3)) > shift, "Invalid Shift value"); \ + f(0, 31), f(T & 1, 30), f(opc, 29), f(0b011110, 28, 23), \ + f((1 << ((T>>1)+3))|shift, 22, 16); f(opc2, 15, 10), rf(Vn, 5), rf(Vd, 0); \ + } + + INSN(shl, 0, 0b010101); + INSN(sshr, 0, 0b000001); + INSN(ushr, 1, 0b000001); + +#undef INSN + void ushll(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) { starti; /* The encodings for the immh:immb fields (bits 22:16) are @@ -2149,6 +2244,23 @@ rf(Vn, 5), rf(Vd, 0); } + void dup(FloatRegister Vd, SIMD_Arrangement T, Register Xs) + { + starti; + assert(T != T1D, "reserved encoding"); + f(0,31), f((int)T & 1, 30), f(0b001110000, 29, 21); + f((1 << (T >> 1)), 20, 16), f(0b000011, 15, 10), rf(Xs, 5), rf(Vd, 0); + } + + void dup(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int index = 0) + { + starti; + assert(T != T1D, "reserved encoding"); + f(0, 31), f((int)T & 1, 30), f(0b001110000, 29, 21); + f(((1 << (T >> 1)) | (index << ((T >> 1) + 1))), 20, 16); + f(0b000001, 15, 10), rf(Vn, 5), rf(Vd, 0); + } + // CRC32 instructions #define INSN(NAME, sf, sz) \ void NAME(Register Rd, Register Rn, Register Rm) { \ diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/cpu/aarch64/vm/globals_aarch64.hpp --- a/hotspot/src/cpu/aarch64/vm/globals_aarch64.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/cpu/aarch64/vm/globals_aarch64.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -64,7 +64,7 @@ define_pd_global(intx, PreInflateSpin, 10); define_pd_global(bool, RewriteBytecodes, true); -define_pd_global(bool, RewriteFrequentPairs, false); +define_pd_global(bool, RewriteFrequentPairs, true); define_pd_global(bool, UseMembar, true); diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp --- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -2802,8 +2802,8 @@ uzp2(v21, v20, v16, T2D); eor(v20, T16B, v17, v21); - shl(v16, v28, T2D, 1); - shl(v17, v20, T2D, 1); + shl(v16, T2D, v28, 1); + shl(v17, T2D, v20, 1); eor(v0, T16B, v0, v16); eor(v1, T16B, v1, v17); diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp --- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -37,6 +37,7 @@ friend class LIR_Assembler; using Assembler::mov; + using Assembler::movi; protected: @@ -464,6 +465,45 @@ void movptr(Register r, uintptr_t imm64); + // Macro to mov replicated immediate to vector register. + // Where imm32 == hex abcdefgh, Vd will get the following values + // for different arrangements in T + // T8B: Vd = ghghghghghghghgh + // T16B: Vd = ghghghghghghghghghghghghghghghgh + // T4H: Vd = efghefghefghefgh + // T8H: Vd = efghefghefghefghefghefghefghefgh + // T2S: Vd = abcdefghabcdefgh + // T4S: Vd = abcdefghabcdefghabcdefghabcdefgh + // T1D/T2D: invalid + void mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32) { + assert(T != T1D && T != T2D, "invalid arrangement"); + u_int32_t nimm32 = ~imm32; + if (T == T8B || T == T16B) { imm32 &= 0xff; nimm32 &= 0xff; } + if (T == T4H || T == T8H) { imm32 &= 0xffff; nimm32 &= 0xffff; } + u_int32_t x = imm32; + int movi_cnt = 0; + int movn_cnt = 0; + while (x) { if (x & 0xff) movi_cnt++; x >>= 8; } + x = nimm32; + while (x) { if (x & 0xff) movn_cnt++; x >>= 8; } + if (movn_cnt < movi_cnt) imm32 = nimm32; + unsigned lsl = 0; + while (imm32 && (imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; } + if (movn_cnt < movi_cnt) + mvni(Vd, T, imm32 & 0xff, lsl); + else + movi(Vd, T, imm32 & 0xff, lsl); + imm32 >>= 8; lsl += 8; + while (imm32) { + while ((imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; } + if (movn_cnt < movi_cnt) + bici(Vd, T, imm32 & 0xff, lsl); + else + orri(Vd, T, imm32 & 0xff, lsl); + lsl += 8; imm32 >>= 8; + } + } + // macro instructions for accessing and updating floating point // status register // diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/cpu/aarch64/vm/register_aarch64.hpp --- a/hotspot/src/cpu/aarch64/vm/register_aarch64.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/cpu/aarch64/vm/register_aarch64.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -186,7 +186,7 @@ // it's optoregs. number_of_registers = (2 * RegisterImpl::number_of_registers + - 2 * FloatRegisterImpl::number_of_registers + + 4 * FloatRegisterImpl::number_of_registers + 1) // flags }; diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp --- a/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -513,23 +513,61 @@ void TemplateTable::iload_internal(RewriteControl rc) { transition(vtos, itos); if (RewriteFrequentPairs && rc == may_rewrite) { - // TODO : check x86 code for what to do here - __ call_Unimplemented(); - } else { - locals_index(r1); - __ ldr(r0, iaddress(r1)); + Label rewrite, done; + Register bc = r4; + + // get next bytecode + __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_iload))); + + // if _iload, wait to rewrite to iload2. We only want to rewrite the + // last two iloads in a pair. Comparing against fast_iload means that + // the next bytecode is neither an iload or a caload, and therefore + // an iload pair. + __ cmpw(r1, Bytecodes::_iload); + __ br(Assembler::EQ, done); + + // if _fast_iload rewrite to _fast_iload2 + __ cmpw(r1, Bytecodes::_fast_iload); + __ movw(bc, Bytecodes::_fast_iload2); + __ br(Assembler::EQ, rewrite); + + // if _caload rewrite to _fast_icaload + __ cmpw(r1, Bytecodes::_caload); + __ movw(bc, Bytecodes::_fast_icaload); + __ br(Assembler::EQ, rewrite); + + // else rewrite to _fast_iload + __ movw(bc, Bytecodes::_fast_iload); + + // rewrite + // bc: new bytecode + __ bind(rewrite); + patch_bytecode(Bytecodes::_iload, bc, r1, false); + __ bind(done); + } + // do iload, get the local value into tos + locals_index(r1); + __ ldr(r0, iaddress(r1)); + } void TemplateTable::fast_iload2() { - __ call_Unimplemented(); + transition(vtos, itos); + locals_index(r1); + __ ldr(r0, iaddress(r1)); + __ push(itos); + locals_index(r1, 3); + __ ldr(r0, iaddress(r1)); } void TemplateTable::fast_iload() { - __ call_Unimplemented(); + transition(vtos, itos); + locals_index(r1); + __ ldr(r0, iaddress(r1)); } void TemplateTable::lload() @@ -721,7 +759,18 @@ // iload followed by caload frequent pair void TemplateTable::fast_icaload() { - __ call_Unimplemented(); + transition(vtos, itos); + // load index out of locals + locals_index(r2); + __ ldr(r1, iaddress(r2)); + + __ pop_ptr(r0); + + // r0: array + // r1: index + index_check(r0, r1); // leaves index in r1, kills rscratch1 + __ lea(r1, Address(r0, r1, Address::uxtw(1))); + __ load_unsigned_short(r0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_CHAR))); } void TemplateTable::saload() @@ -797,7 +846,47 @@ // These bytecodes with a small amount of code are most profitable // to rewrite if (RewriteFrequentPairs && rc == may_rewrite) { - __ call_Unimplemented(); + Label rewrite, done; + const Register bc = r4; + + // get next bytecode + __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0))); + + // do actual aload_0 + aload(0); + + // if _getfield then wait with rewrite + __ cmpw(r1, Bytecodes::Bytecodes::_getfield); + __ br(Assembler::EQ, done); + + // if _igetfield then reqrite to _fast_iaccess_0 + assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); + __ cmpw(r1, Bytecodes::_fast_igetfield); + __ movw(bc, Bytecodes::_fast_iaccess_0); + __ br(Assembler::EQ, rewrite); + + // if _agetfield then reqrite to _fast_aaccess_0 + assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); + __ cmpw(r1, Bytecodes::_fast_agetfield); + __ movw(bc, Bytecodes::_fast_aaccess_0); + __ br(Assembler::EQ, rewrite); + + // if _fgetfield then reqrite to _fast_faccess_0 + assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); + __ cmpw(r1, Bytecodes::_fast_fgetfield); + __ movw(bc, Bytecodes::_fast_faccess_0); + __ br(Assembler::EQ, rewrite); + + // else rewrite to _fast_aload0 + assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition"); + __ movw(bc, Bytecodes::Bytecodes::_fast_aload_0); + + // rewrite + // bc: new bytecode + __ bind(rewrite); + patch_bytecode(Bytecodes::_aload_0, bc, r1, false); + + __ bind(done); } else { aload(0); } diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/os/windows/vm/os_windows.cpp --- a/hotspot/src/os/windows/vm/os_windows.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/os/windows/vm/os_windows.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -3768,7 +3768,7 @@ return NULL; } -#define EXIT_TIMEOUT PRODUCT_ONLY(1000) NOT_PRODUCT(4000) /* 1 sec in product, 4 sec in debug */ +#define EXIT_TIMEOUT 300000 /* 5 minutes */ static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) { InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect); diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/c1/c1_LIRGenerator.cpp --- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -1469,7 +1469,9 @@ } else { guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption"); - flag_type = T_BYTE; + // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM, + // need to use unsigned instructions to use the large offset to load the satb_mark_queue. + flag_type = T_BOOLEAN; } LIR_Opr thrd = getThreadPointer(); LIR_Address* mark_active_flag_addr = diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/cms/cmsOopClosures.hpp --- a/hotspot/src/share/vm/gc/cms/cmsOopClosures.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/cms/cmsOopClosures.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -26,6 +26,7 @@ #define SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP #include "gc/shared/genOopClosures.hpp" +#include "gc/shared/taskqueue.hpp" #include "memory/iterator.hpp" ///////////////////////////////////////////////////////////////// diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp --- a/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -641,6 +641,7 @@ class FreeListSpace_DCTOC : public Filtering_DCTOC { CompactibleFreeListSpace* _cfls; CMSCollector* _collector; + bool _parallel; protected: // Override. #define walk_mem_region_with_cl_DECL(ClosureType) \ @@ -661,9 +662,10 @@ CMSCollector* collector, ExtendedOopClosure* cl, CardTableModRefBS::PrecisionStyle precision, - HeapWord* boundary) : + HeapWord* boundary, + bool parallel) : Filtering_DCTOC(sp, cl, precision, boundary), - _cfls(sp), _collector(collector) {} + _cfls(sp), _collector(collector), _parallel(parallel) {} }; // We de-virtualize the block-related calls below, since we know that our @@ -674,10 +676,7 @@ HeapWord* bottom, \ HeapWord* top, \ ClosureType* cl) { \ - bool is_par = GenCollectedHeap::heap()->n_par_threads() > 0; \ - if (is_par) { \ - assert(GenCollectedHeap::heap()->n_par_threads() == \ - GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch"); \ + if (_parallel) { \ walk_mem_region_with_cl_par(mr, bottom, top, cl); \ } else { \ walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \ @@ -747,8 +746,9 @@ DirtyCardToOopClosure* CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl, CardTableModRefBS::PrecisionStyle precision, - HeapWord* boundary) { - return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary); + HeapWord* boundary, + bool parallel) { + return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary, parallel); } @@ -1897,11 +1897,9 @@ assert(chunk->is_free() && ffc->is_free(), "Error"); _bt.split_block((HeapWord*)chunk, chunk->size(), new_size); if (rem_sz < SmallForDictionary) { - bool is_par = (GenCollectedHeap::heap()->n_par_threads() > 0); + // The freeList lock is held, but multiple GC task threads might be executing in parallel. + bool is_par = Thread::current()->is_GC_task_thread(); if (is_par) _indexedFreeListParLocks[rem_sz]->lock(); - assert(!is_par || - (GenCollectedHeap::heap()->n_par_threads() == - GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch"); returnChunkToFreeList(ffc); split(size, rem_sz); if (is_par) _indexedFreeListParLocks[rem_sz]->unlock(); @@ -1972,8 +1970,6 @@ bool CompactibleFreeListSpace::no_allocs_since_save_marks() { assert(_promoInfo.tracking(), "No preceding save_marks?"); - assert(GenCollectedHeap::heap()->n_par_threads() == 0, - "Shouldn't be called if using parallel gc."); return _promoInfo.noPromotions(); } @@ -1981,8 +1977,6 @@ \ void CompactibleFreeListSpace:: \ oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ - assert(GenCollectedHeap::heap()->n_par_threads() == 0, \ - "Shouldn't be called (yet) during parallel part of gc."); \ _promoInfo.promoted_oops_iterate##nv_suffix(blk); \ /* \ * This also restores any displaced headers and removes the elements from \ diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.hpp --- a/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -438,7 +438,8 @@ // Override: provides a DCTO_CL specific to this kind of space. DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, CardTableModRefBS::PrecisionStyle precision, - HeapWord* boundary); + HeapWord* boundary, + bool parallel); void blk_iterate(BlkClosure* cl); void blk_iterate_careful(BlkClosureCareful* cl); diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp --- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -2428,14 +2428,18 @@ MarkRefsIntoClosure notOlder(_span, verification_mark_bm()); gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. - gch->gen_process_roots(_cmsGen->level(), - true, // younger gens are roots - true, // activate StrongRootsScope - GenCollectedHeap::ScanningOption(roots_scanning_options()), - should_unload_classes(), - ¬Older, - NULL, - NULL); // SSS: Provide correct closure + { + StrongRootsScope srs(1); + + gch->gen_process_roots(&srs, + _cmsGen->level(), + true, // younger gens are roots + GenCollectedHeap::ScanningOption(roots_scanning_options()), + should_unload_classes(), + ¬Older, + NULL, + NULL); + } // Now mark from the roots MarkFromRootsClosure markFromRootsClosure(this, _span, @@ -2496,14 +2500,18 @@ gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. - gch->gen_process_roots(_cmsGen->level(), - true, // younger gens are roots - true, // activate StrongRootsScope - GenCollectedHeap::ScanningOption(roots_scanning_options()), - should_unload_classes(), - ¬Older, - NULL, - &cld_closure); + { + StrongRootsScope srs(1); + + gch->gen_process_roots(&srs, + _cmsGen->level(), + true, // younger gens are roots + GenCollectedHeap::ScanningOption(roots_scanning_options()), + should_unload_classes(), + ¬Older, + NULL, + &cld_closure); + } // Now mark from the roots MarkFromRootsVerifyClosure markFromRootsClosure(this, _span, @@ -2913,10 +2921,11 @@ // Parallel initial mark task class CMSParInitialMarkTask: public CMSParMarkTask { + StrongRootsScope* _strong_roots_scope; public: - CMSParInitialMarkTask(CMSCollector* collector, uint n_workers) : - CMSParMarkTask("Scan roots and young gen for initial mark in parallel", - collector, n_workers) {} + CMSParInitialMarkTask(CMSCollector* collector, StrongRootsScope* strong_roots_scope, uint n_workers) : + CMSParMarkTask("Scan roots and young gen for initial mark in parallel", collector, n_workers), + _strong_roots_scope(strong_roots_scope) {} void work(uint worker_id); }; @@ -3004,24 +3013,26 @@ FlexibleWorkGang* workers = gch->workers(); assert(workers != NULL, "Need parallel worker threads."); uint n_workers = workers->active_workers(); - CMSParInitialMarkTask tsk(this, n_workers); - gch->set_par_threads(n_workers); + + StrongRootsScope srs(n_workers); + + CMSParInitialMarkTask tsk(this, &srs, n_workers); initialize_sequential_subtasks_for_young_gen_rescan(n_workers); if (n_workers > 1) { - StrongRootsScope srs; workers->run_task(&tsk); } else { - StrongRootsScope srs; tsk.work(0); } - gch->set_par_threads(0); } else { // The serial version. CLDToOopClosure cld_closure(¬Older, true); gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. - gch->gen_process_roots(_cmsGen->level(), + + StrongRootsScope srs(1); + + gch->gen_process_roots(&srs, + _cmsGen->level(), true, // younger gens are roots - true, // activate StrongRootsScope GenCollectedHeap::ScanningOption(roots_scanning_options()), should_unload_classes(), ¬Older, @@ -4452,9 +4463,9 @@ CLDToOopClosure cld_closure(&par_mri_cl, true); - gch->gen_process_roots(_collector->_cmsGen->level(), + gch->gen_process_roots(_strong_roots_scope, + _collector->_cmsGen->level(), false, // yg was scanned above - false, // this is parallel code GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), _collector->should_unload_classes(), &par_mri_cl, @@ -4478,6 +4489,7 @@ // The per-thread work queues, available here for stealing. OopTaskQueueSet* _task_queues; ParallelTaskTerminator _term; + StrongRootsScope* _strong_roots_scope; public: // A value of 0 passed to n_workers will cause the number of @@ -4485,12 +4497,14 @@ CMSParRemarkTask(CMSCollector* collector, CompactibleFreeListSpace* cms_space, uint n_workers, FlexibleWorkGang* workers, - OopTaskQueueSet* task_queues): + OopTaskQueueSet* task_queues, + StrongRootsScope* strong_roots_scope): CMSParMarkTask("Rescan roots and grey objects in parallel", collector, n_workers), _cms_space(cms_space), _task_queues(task_queues), - _term(n_workers, task_queues) { } + _term(n_workers, task_queues), + _strong_roots_scope(strong_roots_scope) { } OopTaskQueueSet* task_queues() { return _task_queues; } @@ -4588,9 +4602,9 @@ // ---------- remaining roots -------------- _timer.reset(); _timer.start(); - gch->gen_process_roots(_collector->_cmsGen->level(), + gch->gen_process_roots(_strong_roots_scope, + _collector->_cmsGen->level(), false, // yg was scanned above - false, // this is parallel code GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), _collector->should_unload_classes(), &par_mrias_cl, @@ -5058,22 +5072,15 @@ FlexibleWorkGang* workers = gch->workers(); assert(workers != NULL, "Need parallel worker threads."); // Choose to use the number of GC workers most recently set - // into "active_workers". If active_workers is not set, set it - // to ParallelGCThreads. + // into "active_workers". uint n_workers = workers->active_workers(); - if (n_workers == 0) { - assert(n_workers > 0, "Should have been set during scavenge"); - n_workers = ParallelGCThreads; - workers->set_active_workers(n_workers); - } + CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); - CMSParRemarkTask tsk(this, - cms_space, - n_workers, workers, task_queues()); - - // Set up for parallel process_roots work. - gch->set_par_threads(n_workers); + StrongRootsScope srs(n_workers); + + CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs); + // We won't be iterating over the cards in the card table updating // the younger_gen cards, so we shouldn't call the following else // the verification code as well as subsequent younger_refs_iterate @@ -5105,15 +5112,12 @@ // necessarily be so, since it's possible that we are doing // ST marking. ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true); - StrongRootsScope srs; workers->run_task(&tsk); } else { ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false); - StrongRootsScope srs; tsk.work(0); } - gch->set_par_threads(0); // 0 ==> non-parallel. // restore, single-threaded for now, any preserved marks // as a result of work_q overflow restore_preserved_marks_if_any(); @@ -5177,11 +5181,11 @@ verify_work_stacks_empty(); gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. - StrongRootsScope srs; - - gch->gen_process_roots(_cmsGen->level(), + StrongRootsScope srs(1); + + gch->gen_process_roots(&srs, + _cmsGen->level(), true, // younger gens as roots - false, // use the local StrongRootsScope GenCollectedHeap::ScanningOption(roots_scanning_options()), should_unload_classes(), &mrias_cl, @@ -5254,18 +5258,14 @@ CMSBitMap* mark_bit_map, AbstractWorkGang* workers, OopTaskQueueSet* task_queues): - // XXX Should superclass AGTWOQ also know about AWG since it knows - // about the task_queues used by the AWG? Then it could initialize - // the terminator() object. See 6984287. The set_for_termination() - // below is a temporary band-aid for the regression in 6984287. AbstractGangTaskWOopQueues("Process referents by policy in parallel", - task_queues), + task_queues, + workers->active_workers()), _task(task), _collector(collector), _span(span), _mark_bit_map(mark_bit_map) { assert(_collector->_span.equals(_span) && !_span.is_empty(), "Inconsistency in _span"); - set_for_termination(workers->active_workers()); } OopTaskQueueSet* task_queues() { return queues(); } diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/cms/parCardTableModRefBS.cpp --- a/hotspot/src/share/vm/gc/cms/parCardTableModRefBS.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/cms/parCardTableModRefBS.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -39,16 +39,11 @@ void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, OopsInGenClosure* cl, CardTableRS* ct, - int n_threads) { - assert(n_threads > 0, "Error: expected n_threads > 0"); - assert((n_threads == 1 && ParallelGCThreads == 0) || - n_threads <= (int)ParallelGCThreads, - "# worker threads != # requested!"); - assert(!Thread::current()->is_VM_thread() || (n_threads == 1), "There is only 1 VM thread"); - assert(UseDynamicNumberOfGCThreads || - !FLAG_IS_DEFAULT(ParallelGCThreads) || - n_threads == (int)ParallelGCThreads, - "# worker threads != # requested!"); + uint n_threads) { + assert(n_threads > 0, "expected n_threads > 0"); + assert(n_threads <= ParallelGCThreads, + err_msg("n_threads: %u > ParallelGCThreads: " UINTX_FORMAT, n_threads, ParallelGCThreads)); + // Make sure the LNC array is valid for the space. jbyte** lowest_non_clean; uintptr_t lowest_non_clean_base_chunk_index; @@ -66,7 +61,8 @@ uint stride = 0; while (!pst->is_task_claimed(/* reference */ stride)) { - process_stride(sp, mr, stride, n_strides, cl, ct, + process_stride(sp, mr, stride, n_strides, + cl, ct, lowest_non_clean, lowest_non_clean_base_chunk_index, lowest_non_clean_chunk_size); @@ -132,9 +128,13 @@ assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)"); assert(used.contains(chunk_mr), "chunk_mr should be subset of used"); + // This function is used by the parallel card table iteration. + const bool parallel = true; + DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), - cl->gen_boundary()); - ClearNoncleanCardWrapper clear_cl(dcto_cl, ct); + cl->gen_boundary(), + parallel); + ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel); // Process the chunk. diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/cms/parNewGeneration.cpp --- a/hotspot/src/share/vm/gc/cms/parNewGeneration.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/cms/parNewGeneration.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -567,23 +567,15 @@ } ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen, - HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) : + HeapWord* young_old_boundary, ParScanThreadStateSet* state_set, + StrongRootsScope* strong_roots_scope) : AbstractGangTask("ParNewGeneration collection"), _gen(gen), _old_gen(old_gen), _young_old_boundary(young_old_boundary), - _state_set(state_set) + _state_set(state_set), + _strong_roots_scope(strong_roots_scope) {} -// Reset the terminator for the given number of -// active threads. -void ParNewGenTask::set_for_termination(uint active_workers) { - _state_set->reset(active_workers, _gen->promotion_failed()); - // Should the heap be passed in? There's only 1 for now so - // grab it instead. - GenCollectedHeap* gch = GenCollectedHeap::heap(); - gch->set_n_termination(active_workers); -} - void ParNewGenTask::work(uint worker_id) { GenCollectedHeap* gch = GenCollectedHeap::heap(); // Since this is being done in a separate thread, need new resource @@ -603,10 +595,10 @@ false); par_scan_state.start_strong_roots(); - gch->gen_process_roots(_gen->level(), + gch->gen_process_roots(_strong_roots_scope, + _gen->level(), true, // Process younger gens, if any, // as strong roots. - false, // no scope; this is parallel code GenCollectedHeap::SO_ScavengeCodeCache, GenCollectedHeap::StrongAndWeakRoots, &par_scan_state.to_space_root_closure(), @@ -759,9 +751,6 @@ private: virtual void work(uint worker_id); - virtual void set_for_termination(uint active_workers) { - _state_set.terminator()->reset_for_reuse(active_workers); - } private: ParNewGeneration& _gen; ProcessTask& _task; @@ -838,7 +827,6 @@ { _state_set.flush(); GenCollectedHeap* gch = GenCollectedHeap::heap(); - gch->set_par_threads(0); // 0 ==> non-parallel. gch->save_marks(); } @@ -939,33 +927,35 @@ to()->clear(SpaceDecorator::Mangle); gch->save_marks(); - assert(workers != NULL, "Need parallel worker threads."); - uint n_workers = active_workers; // Set the correct parallelism (number of queues) in the reference processor - ref_processor()->set_active_mt_degree(n_workers); + ref_processor()->set_active_mt_degree(active_workers); // Always set the terminator for the active number of workers // because only those workers go through the termination protocol. - ParallelTaskTerminator _term(n_workers, task_queues()); - ParScanThreadStateSet thread_state_set(workers->active_workers(), + ParallelTaskTerminator _term(active_workers, task_queues()); + ParScanThreadStateSet thread_state_set(active_workers, *to(), *this, *_old_gen, *task_queues(), _overflow_stacks, desired_plab_sz(), _term); - ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set); - gch->set_par_threads(n_workers); - gch->rem_set()->prepare_for_younger_refs_iterate(true); - // It turns out that even when we're using 1 thread, doing the work in a - // separate thread causes wide variance in run times. We can't help this - // in the multi-threaded case, but we special-case n=1 here to get - // repeatable measurements of the 1-thread overhead of the parallel code. - if (n_workers > 1) { - StrongRootsScope srs; - workers->run_task(&tsk); - } else { - StrongRootsScope srs; - tsk.work(0); + thread_state_set.reset(active_workers, promotion_failed()); + + { + StrongRootsScope srs(active_workers); + + ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs); + gch->rem_set()->prepare_for_younger_refs_iterate(true); + // It turns out that even when we're using 1 thread, doing the work in a + // separate thread causes wide variance in run times. We can't help this + // in the multi-threaded case, but we special-case n=1 here to get + // repeatable measurements of the 1-thread overhead of the parallel code. + if (active_workers > 1) { + workers->run_task(&tsk); + } else { + tsk.work(0); + } } + thread_state_set.reset(0 /* Bad value in debug if not reset */, promotion_failed()); @@ -995,7 +985,6 @@ _gc_timer, _gc_tracer.gc_id()); } else { thread_state_set.flush(); - gch->set_par_threads(0); // 0 ==> non-parallel. gch->save_marks(); stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, NULL, @@ -1033,7 +1022,7 @@ to()->set_concurrent_iteration_safe_limit(to()->top()); if (ResizePLAB) { - plab_stats()->adjust_desired_plab_sz(n_workers); + plab_stats()->adjust_desired_plab_sz(active_workers); } if (PrintGC && !PrintGCDetails) { @@ -1477,9 +1466,9 @@ _ref_processor = new ReferenceProcessor(_reserved, // span ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing - (int) ParallelGCThreads, // mt processing degree + (uint) ParallelGCThreads, // mt processing degree refs_discovery_is_mt(), // mt discovery - (int) ParallelGCThreads, // mt discovery degree + (uint) ParallelGCThreads, // mt discovery degree refs_discovery_is_atomic(), // atomic_discovery NULL); // is_alive_non_header } diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/cms/parNewGeneration.hpp --- a/hotspot/src/share/vm/gc/cms/parNewGeneration.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/cms/parNewGeneration.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -39,6 +39,7 @@ class ParRootScanWithoutBarrierClosure; class ParRootScanWithBarrierTwoGensClosure; class ParEvacuateFollowersClosure; +class StrongRootsScope; // It would be better if these types could be kept local to the .cpp file, // but they must be here to allow ParScanClosure::do_oop_work to be defined @@ -237,20 +238,18 @@ Generation* _old_gen; HeapWord* _young_old_boundary; class ParScanThreadStateSet* _state_set; + StrongRootsScope* _strong_roots_scope; public: ParNewGenTask(ParNewGeneration* gen, Generation* old_gen, HeapWord* young_old_boundary, - ParScanThreadStateSet* state_set); + ParScanThreadStateSet* state_set, + StrongRootsScope* strong_roots_scope); HeapWord* young_old_boundary() { return _young_old_boundary; } void work(uint worker_id); - - // Reset the terminator in ParScanThreadStateSet for - // "active_workers" threads. - virtual void set_for_termination(uint active_workers); }; class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure { diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/cms/parOopClosures.hpp --- a/hotspot/src/share/vm/gc/cms/parOopClosures.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/cms/parOopClosures.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -26,6 +26,7 @@ #define SHARE_VM_GC_CMS_PAROOPCLOSURES_HPP #include "gc/shared/genOopClosures.hpp" +#include "gc/shared/taskqueue.hpp" #include "memory/padded.hpp" // Closures for ParNewGeneration diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/cms/yieldingWorkgroup.hpp --- a/hotspot/src/share/vm/gc/cms/yieldingWorkgroup.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/cms/yieldingWorkgroup.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -147,6 +147,13 @@ bool completed() const { return _status == COMPLETED; } bool aborted() const { return _status == ABORTED; } bool active() const { return _status == ACTIVE; } + + // This method configures the task for proper termination. + // Some tasks do not have any requirements on termination + // and may inherit this method that does nothing. Some + // tasks do some coordination on termination and override + // this method to implement that coordination. + virtual void set_for_termination(uint active_workers) {} }; // Class YieldingWorkGang: A subclass of WorkGang. // In particular, a YieldingWorkGang is made up of diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/g1/collectionSetChooser.cpp --- a/hotspot/src/share/vm/gc/g1/collectionSetChooser.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/g1/collectionSetChooser.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -158,20 +158,10 @@ hr->calc_gc_efficiency(); } -void CollectionSetChooser::prepare_for_par_region_addition(uint n_regions, +void CollectionSetChooser::prepare_for_par_region_addition(uint n_threads, + uint n_regions, uint chunk_size) { _first_par_unreserved_idx = 0; - uint n_threads = (uint) ParallelGCThreads; - if (UseDynamicNumberOfGCThreads) { - assert(G1CollectedHeap::heap()->workers()->active_workers() > 0, - "Should have been set earlier"); - // This is defensive code. As the assertion above says, the number - // of active threads should be > 0, but in case there is some path - // or some improperly initialized variable with leads to no - // active threads, protect against that in a product build. - n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(), - 1U); - } uint max_waste = n_threads * chunk_size; // it should be aligned with respect to chunk_size uint aligned_n_regions = (n_regions + chunk_size - 1) / chunk_size * chunk_size; diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/g1/collectionSetChooser.hpp --- a/hotspot/src/share/vm/gc/g1/collectionSetChooser.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/g1/collectionSetChooser.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -121,7 +121,7 @@ // Must be called before calls to claim_array_chunk(). // n_regions is the number of regions, chunk_size the chunk size. - void prepare_for_par_region_addition(uint n_regions, uint chunk_size); + void prepare_for_par_region_addition(uint n_threads, uint n_regions, uint chunk_size); // Returns the first index in a contiguous chunk of chunk_size indexes // that the calling thread has reserved. These must be set by the // calling thread using set_region() (to NULL if necessary). diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp --- a/hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/g1/concurrentG1Refine.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -35,7 +35,7 @@ { // Ergonomically select initial concurrent refinement parameters if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) { - FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2(ParallelGCThreads, 1)); + FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, (intx)ParallelGCThreads); } set_green_zone(G1ConcRefinementGreenZone); diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/g1/concurrentMark.cpp --- a/hotspot/src/share/vm/gc/g1/concurrentMark.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/g1/concurrentMark.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -518,7 +518,7 @@ _markStack(this), // _finger set in set_non_marking_state - _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)), + _max_worker_id((uint)ParallelGCThreads), // _active_tasks set in set_non_marking_state // _tasks set inside the constructor _task_queues(new CMTaskQueueSet((int) _max_worker_id)), @@ -1218,15 +1218,13 @@ "Maximum number of marking threads exceeded"); uint active_workers = MAX2(1U, parallel_marking_threads()); + assert(active_workers > 0, "Should have been set"); // Parallel task terminator is set in "set_concurrency_and_phase()" set_concurrency_and_phase(active_workers, true /* concurrent */); CMConcurrentMarkingTask markingTask(this, cmThread()); _parallel_workers->set_active_workers(active_workers); - // Don't set _n_par_threads because it affects MT in process_roots() - // and the decisions on that MT processing is made elsewhere. - assert(_parallel_workers->active_workers() > 0, "Should have been set"); _parallel_workers->run_task(&markingTask); print_stats(); } @@ -1761,28 +1759,20 @@ } }; -class G1ParNoteEndTask; - class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { G1CollectedHeap* _g1; - size_t _max_live_bytes; - uint _regions_claimed; size_t _freed_bytes; FreeRegionList* _local_cleanup_list; HeapRegionSetCount _old_regions_removed; HeapRegionSetCount _humongous_regions_removed; HRRSCleanupTask* _hrrs_cleanup_task; - double _claimed_region_time; - double _max_region_time; public: G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, FreeRegionList* local_cleanup_list, HRRSCleanupTask* hrrs_cleanup_task) : _g1(g1), - _max_live_bytes(0), _regions_claimed(0), _freed_bytes(0), - _claimed_region_time(0.0), _max_region_time(0.0), _local_cleanup_list(local_cleanup_list), _old_regions_removed(), _humongous_regions_removed(), @@ -1799,10 +1789,7 @@ // We use a claim value of zero here because all regions // were claimed with value 1 in the FinalCount task. _g1->reset_gc_time_stamps(hr); - double start = os::elapsedTime(); - _regions_claimed++; hr->note_end_of_marking(); - _max_live_bytes += hr->max_live_bytes(); if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { _freed_bytes += hr->used(); @@ -1819,18 +1806,8 @@ hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); } - double region_time = (os::elapsedTime() - start); - _claimed_region_time += region_time; - if (region_time > _max_region_time) { - _max_region_time = region_time; - } return false; } - - size_t max_live_bytes() { return _max_live_bytes; } - uint regions_claimed() { return _regions_claimed; } - double claimed_region_time_sec() { return _claimed_region_time; } - double max_region_time_sec() { return _max_region_time; } }; class G1ParNoteEndTask: public AbstractGangTask { @@ -1838,14 +1815,12 @@ protected: G1CollectedHeap* _g1h; - size_t _max_live_bytes; - size_t _freed_bytes; FreeRegionList* _cleanup_list; HeapRegionClaimer _hrclaimer; public: G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : - AbstractGangTask("G1 note end"), _g1h(g1h), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { + AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { } void work(uint worker_id) { @@ -1861,8 +1836,6 @@ { MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); - _max_live_bytes += g1_note_end.max_live_bytes(); - _freed_bytes += g1_note_end.freed_bytes(); // If we iterate over the global cleanup list at the end of // cleanup to do this printing we will not guarantee to only @@ -1887,8 +1860,6 @@ HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); } } - size_t max_live_bytes() { return _max_live_bytes; } - size_t freed_bytes() { return _freed_bytes; } }; class G1ParScrubRemSetTask: public AbstractGangTask { @@ -1938,18 +1909,10 @@ HeapRegionRemSet::reset_for_cleanup_tasks(); - uint n_workers; - // Do counting once more with the world stopped for good measure. G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); - g1h->set_par_threads(); - n_workers = g1h->n_par_threads(); - assert(g1h->n_par_threads() == n_workers, - "Should not have been reset"); g1h->workers()->run_task(&g1_par_count_task); - // Done with the parallel phase so reset to 0. - g1h->set_par_threads(0); if (VerifyDuringGC) { // Verify that the counting data accumulated during marking matches @@ -1965,10 +1928,7 @@ &expected_region_bm, &expected_card_bm); - g1h->set_par_threads((int)n_workers); g1h->workers()->run_task(&g1_par_verify_task); - // Done with the parallel phase so reset to 0. - g1h->set_par_threads(0); guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); } @@ -1990,11 +1950,11 @@ g1h->reset_gc_time_stamp(); + uint n_workers = _g1h->workers()->active_workers(); + // Note end of marking in all heap regions. G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); - g1h->set_par_threads((int)n_workers); g1h->workers()->run_task(&g1_par_note_end_task); - g1h->set_par_threads(0); g1h->check_gc_time_stamps(); if (!cleanup_list_is_empty()) { @@ -2009,9 +1969,7 @@ if (G1ScrubRemSets) { double rs_scrub_start = os::elapsedTime(); G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers); - g1h->set_par_threads((int)n_workers); g1h->workers()->run_task(&g1_par_scrub_rs_task); - g1h->set_par_threads(0); double rs_scrub_end = os::elapsedTime(); double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start); @@ -2020,7 +1978,7 @@ // this will also free any regions totally full of garbage objects, // and sort the regions. - g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers); + g1h->g1_policy()->record_concurrent_mark_cleanup_end(); // Statistics. double end = os::elapsedTime(); @@ -2312,9 +2270,7 @@ // and overflow handling in CMTask::do_marking_step() knows // how many workers to wait for. _cm->set_concurrency(_active_workers); - _g1h->set_par_threads(_active_workers); _workers->run_task(&proc_task_proxy); - _g1h->set_par_threads(0); } class G1CMRefEnqueueTaskProxy: public AbstractGangTask { @@ -2344,9 +2300,7 @@ // and overflow handling in CMTask::do_marking_step() knows // how many workers to wait for. _cm->set_concurrency(_active_workers); - _g1h->set_par_threads(_active_workers); _workers->run_task(&enq_task_proxy); - _g1h->set_par_threads(0); } void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { @@ -2608,27 +2562,23 @@ g1h->ensure_parsability(false); - StrongRootsScope srs; // this is remark, so we'll use up all active threads uint active_workers = g1h->workers()->active_workers(); - if (active_workers == 0) { - assert(active_workers > 0, "Should have been set earlier"); - active_workers = (uint) ParallelGCThreads; - g1h->workers()->set_active_workers(active_workers); - } set_concurrency_and_phase(active_workers, false /* concurrent */); // Leave _parallel_marking_threads at it's // value originally calculated in the ConcurrentMark // constructor and pass values of the active workers // through the gang in the task. - CMRemarkTask remarkTask(this, active_workers); - // We will start all available threads, even if we decide that the - // active_workers will be fewer. The extra ones will just bail out - // immediately. - g1h->set_par_threads(active_workers); - g1h->workers()->run_task(&remarkTask); - g1h->set_par_threads(0); + { + StrongRootsScope srs(active_workers); + + CMRemarkTask remarkTask(this, active_workers); + // We will start all available threads, even if we decide that the + // active_workers will be fewer. The extra ones will just bail out + // immediately. + g1h->workers()->run_task(&remarkTask); + } SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); guarantee(has_overflown() || @@ -3001,9 +2951,7 @@ G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, _max_worker_id, n_workers); - _g1h->set_par_threads(n_workers); _g1h->workers()->run_task(&g1_par_agg_task); - _g1h->set_par_threads(0); } // Clear the per-worker arrays used to store the per-region counting data diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp --- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -1326,27 +1326,10 @@ AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), workers()->active_workers(), Threads::number_of_non_daemon_threads()); - assert(UseDynamicNumberOfGCThreads || - n_workers == workers()->total_workers(), - "If not dynamic should be using all the workers"); workers()->set_active_workers(n_workers); - // Set parallel threads in the heap (_n_par_threads) only - // before a parallel phase and always reset it to 0 after - // the phase so that the number of parallel threads does - // no get carried forward to a serial phase where there - // may be code that is "possibly_parallel". - set_par_threads(n_workers); ParRebuildRSTask rebuild_rs_task(this); - assert(UseDynamicNumberOfGCThreads || - workers()->active_workers() == workers()->total_workers(), - "Unless dynamic should use total workers"); - // Use the most recent number of active workers - assert(workers()->active_workers() > 0, - "Active workers not properly set"); - set_par_threads(workers()->active_workers()); workers()->run_task(&rebuild_rs_task); - set_par_threads(0); // Rebuild the strong code root lists for each region rebuild_strong_code_roots(); @@ -1769,7 +1752,7 @@ _allocator = G1Allocator::create_allocator(this); _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; - int n_queues = MAX2((int)ParallelGCThreads, 1); + int n_queues = (int)ParallelGCThreads; _task_queues = new RefToScanQueueSet(n_queues); uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); @@ -2081,11 +2064,11 @@ new ReferenceProcessor(mr, // span ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing - (int) ParallelGCThreads, + (uint) ParallelGCThreads, // degree of mt processing (ParallelGCThreads > 1) || (ConcGCThreads > 1), // mt discovery - (int) MAX2(ParallelGCThreads, ConcGCThreads), + (uint) MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery false, // Reference discovery is not atomic @@ -2098,11 +2081,11 @@ new ReferenceProcessor(mr, // span ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing - MAX2((int)ParallelGCThreads, 1), + (uint) ParallelGCThreads, // degree of mt processing (ParallelGCThreads > 1), // mt discovery - MAX2((int)ParallelGCThreads, 1), + (uint) ParallelGCThreads, // degree of mt discovery true, // Reference discovery is atomic @@ -2502,8 +2485,7 @@ assert(_worker_cset_start_region != NULL, "sanity"); assert(_worker_cset_start_region_time_stamp != NULL, "sanity"); - int n_queues = MAX2((int)ParallelGCThreads, 1); - for (int i = 0; i < n_queues; i++) { + for (uint i = 0; i < ParallelGCThreads; i++) { _worker_cset_start_region[i] = NULL; _worker_cset_start_region_time_stamp[i] = 0; } @@ -2541,9 +2523,6 @@ result = g1_policy()->collection_set(); uint cs_size = g1_policy()->cset_region_length(); uint active_workers = workers()->active_workers(); - assert(UseDynamicNumberOfGCThreads || - active_workers == workers()->total_workers(), - "Unless dynamic should use total workers"); uint end_ind = (cs_size * worker_i) / active_workers; uint start_ind = 0; @@ -3021,7 +3000,7 @@ G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl); { - G1RootProcessor root_processor(this); + G1RootProcessor root_processor(this, 1); root_processor.process_all_roots(&rootsCl, &cldCl, &blobsCl); @@ -3042,13 +3021,7 @@ if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { G1ParVerifyTask task(this, vo); - assert(UseDynamicNumberOfGCThreads || - workers()->active_workers() == workers()->total_workers(), - "If not dynamic should be using all the workers"); - uint n_workers = workers()->active_workers(); - set_par_threads(n_workers); workers()->run_task(&task); - set_par_threads(0); if (task.failures()) { failures = true; } @@ -3572,6 +3545,10 @@ }; #endif // ASSERT +uint G1CollectedHeap::num_task_queues() const { + return _task_queues->size(); +} + #if TASKQUEUE_STATS void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) { st->print_raw_cr("GC Task Stats"); @@ -3583,7 +3560,7 @@ print_taskqueue_stats_hdr(st); TaskQueueStats totals; - const uint n = workers()->total_workers(); + const uint n = num_task_queues(); for (uint i = 0; i < n; ++i) { st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr(); totals += task_queue(i)->stats; @@ -3594,7 +3571,7 @@ } void G1CollectedHeap::reset_taskqueue_stats() { - const uint n = workers()->total_workers(); + const uint n = num_task_queues(); for (uint i = 0; i < n; ++i) { task_queue(i)->stats.reset(); } @@ -3696,9 +3673,6 @@ uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), workers()->active_workers(), Threads::number_of_non_daemon_threads()); - assert(UseDynamicNumberOfGCThreads || - active_workers == workers()->total_workers(), - "If not dynamic should be using all the workers"); workers()->set_active_workers(active_workers); double pause_start_sec = os::elapsedTime(); @@ -3873,8 +3847,7 @@ if (evacuation_failed()) { _allocator->set_used(recalculate_used()); - uint n_queues = MAX2((int)ParallelGCThreads, 1); - for (uint i = 0; i < n_queues; i++) { + for (uint i = 0; i < ParallelGCThreads; i++) { if (_evacuation_failed_info_array[i].has_failed()) { _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]); } @@ -4041,10 +4014,8 @@ void G1CollectedHeap::remove_self_forwarding_pointers() { double remove_self_forwards_start = os::elapsedTime(); - set_par_threads(); G1ParRemoveSelfForwardPtrsTask rsfp_task(this); workers()->run_task(&rsfp_task); - set_par_threads(0); // Now restore saved marks, if any. assert(_objs_with_preserved_marks.size() == @@ -4308,12 +4279,13 @@ Mutex* stats_lock() { return &_stats_lock; } public: - G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor) + G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor, uint n_workers) : AbstractGangTask("G1 collection"), _g1h(g1h), _queues(task_queues), _root_processor(root_processor), - _terminator(0, _queues), + _terminator(n_workers, _queues), + _n_workers(n_workers), _stats_lock(Mutex::leaf, "parallel G1 stats lock", true) {} @@ -4325,12 +4297,6 @@ ParallelTaskTerminator* terminator() { return &_terminator; } - virtual void set_for_termination(uint active_workers) { - _root_processor->set_num_workers(active_workers); - terminator()->reset_for_reuse(active_workers); - _n_workers = active_workers; - } - // Helps out with CLD processing. // // During InitialMark we need to: @@ -4811,19 +4777,14 @@ G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols, n_workers, class_unloading_occurred); - set_par_threads(n_workers); workers()->run_task(&g1_unlink_task); - set_par_threads(0); } void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) { { - uint n_workers = workers()->active_workers(); G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols); - set_par_threads(n_workers); workers()->run_task(&g1_unlink_task); - set_par_threads(0); } if (G1StringDedup::is_enabled()) { @@ -4851,13 +4812,9 @@ void G1CollectedHeap::redirty_logged_cards() { double redirty_logged_cards_start = os::elapsedTime(); - uint n_workers = workers()->active_workers(); - G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set()); dirty_card_queue_set().reset_for_par_iteration(); - set_par_threads(n_workers); workers()->run_task(&redirty_task); - set_par_threads(0); DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set(); dcq.merge_bufferlists(&dirty_card_queue_set()); @@ -5093,9 +5050,7 @@ ParallelTaskTerminator terminator(_active_workers, _queues); G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator); - _g1h->set_par_threads(_active_workers); _workers->run_task(&proc_task_proxy); - _g1h->set_par_threads(0); } // Gang task for parallel reference enqueueing. @@ -5124,9 +5079,7 @@ G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task); - _g1h->set_par_threads(_active_workers); _workers->run_task(&enq_task_proxy); - _g1h->set_par_threads(0); } // End of weak reference support closures @@ -5219,7 +5172,7 @@ }; // Weak Reference processing during an evacuation pause (part 1). -void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) { +void G1CollectedHeap::process_discovered_references() { double ref_proc_start = os::elapsedTime(); ReferenceProcessor* rp = _ref_processor_stw; @@ -5246,17 +5199,14 @@ // referents points to another object which is also referenced by an // object discovered by the STW ref processor. - assert(no_of_gc_workers == workers()->active_workers(), "Need to reset active GC workers"); - - set_par_threads(no_of_gc_workers); + uint no_of_gc_workers = workers()->active_workers(); + G1ParPreserveCMReferentsTask keep_cm_referents(this, no_of_gc_workers, _task_queues); workers()->run_task(&keep_cm_referents); - set_par_threads(0); - // Closure to test whether a referent is alive. G1STWIsAliveClosure is_alive(this); @@ -5330,7 +5280,7 @@ } // Weak Reference processing during an evacuation pause (part 2). -void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) { +void G1CollectedHeap::enqueue_discovered_references() { double ref_enq_start = os::elapsedTime(); ReferenceProcessor* rp = _ref_processor_stw; @@ -5344,12 +5294,12 @@ } else { // Parallel reference enqueueing - assert(no_of_gc_workers == workers()->active_workers(), - "Need to reset active workers"); - assert(rp->num_q() == no_of_gc_workers, "sanity"); - assert(no_of_gc_workers <= rp->max_num_q(), "sanity"); - - G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers); + uint n_workers = workers()->active_workers(); + + assert(rp->num_q() == n_workers, "sanity"); + assert(n_workers <= rp->max_num_q(), "sanity"); + + G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, n_workers); rp->enqueue_discovered_references(&par_task_executor); } @@ -5380,11 +5330,6 @@ hot_card_cache->set_use_cache(false); const uint n_workers = workers()->active_workers(); - assert(UseDynamicNumberOfGCThreads || - n_workers == workers()->total_workers(), - "If not dynamic should be using all the workers"); - set_par_threads(n_workers); - init_for_evac_failure(NULL); @@ -5393,19 +5338,16 @@ double end_par_time_sec; { - G1RootProcessor root_processor(this); - G1ParTask g1_par_task(this, _task_queues, &root_processor); + G1RootProcessor root_processor(this, n_workers); + G1ParTask g1_par_task(this, _task_queues, &root_processor, n_workers); // InitialMark needs claim bits to keep track of the marked-through CLDs. if (g1_policy()->during_initial_mark_pause()) { ClassLoaderDataGraph::clear_claimed_marks(); } - // The individual threads will set their evac-failure closures. - if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr(); - // These tasks use ShareHeap::_process_strong_tasks - assert(UseDynamicNumberOfGCThreads || - workers()->active_workers() == workers()->total_workers(), - "If not dynamic should be using all the workers"); + // The individual threads will set their evac-failure closures. + if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr(); + workers()->run_task(&g1_par_task); end_par_time_sec = os::elapsedTime(); @@ -5425,14 +5367,12 @@ (os::elapsedTime() - end_par_time_sec) * 1000.0; phase_times->record_code_root_fixup_time(code_root_fixup_time_ms); - set_par_threads(0); - // Process any discovered reference objects - we have // to do this _before_ we retire the GC alloc regions // as we may have to copy some 'reachable' referent // objects (and their reachable sub-graphs) that were // not copied during the pause. - process_discovered_references(n_workers); + process_discovered_references(); if (G1StringDedup::is_enabled()) { double fixup_start = os::elapsedTime(); @@ -5474,7 +5414,7 @@ // will log these updates (and dirty their associated // cards). We need these updates logged to update any // RSets. - enqueue_discovered_references(n_workers); + enqueue_discovered_references(); redirty_logged_cards(); COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); @@ -5779,9 +5719,7 @@ // Iterate over the dirty cards region list. G1ParCleanupCTTask cleanup_task(ct_bs, this); - set_par_threads(); workers()->run_task(&cleanup_task); - set_par_threads(0); #ifndef PRODUCT if (G1VerifyCTCleanup || VerifyAfterGC) { G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs); @@ -6314,21 +6252,6 @@ g1mm()->update_eden_size(); } -void G1CollectedHeap::set_par_threads() { - // Don't change the number of workers. Use the value previously set - // in the workgroup. - uint n_workers = workers()->active_workers(); - assert(UseDynamicNumberOfGCThreads || - n_workers == workers()->total_workers(), - "Otherwise should be using the total number of workers"); - if (n_workers == 0) { - assert(false, "Should have been set in prior evacuation pause."); - n_workers = ParallelGCThreads; - workers()->set_active_workers(n_workers); - } - set_par_threads(n_workers); -} - // Methods for the GC alloc regions HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp --- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -606,11 +606,11 @@ // Process any reference objects discovered during // an incremental evacuation pause. - void process_discovered_references(uint no_of_gc_workers); + void process_discovered_references(); // Enqueue any remaining discovered references // after processing. - void enqueue_discovered_references(uint no_of_gc_workers); + void enqueue_discovered_references(); public: FlexibleWorkGang* workers() const { return _workers; } @@ -981,6 +981,8 @@ RefToScanQueue *task_queue(uint i) const; + uint num_task_queues() const; + // A set of cards where updates happened during the GC DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; } @@ -1012,11 +1014,6 @@ // Initialize weak reference processing. void ref_processing_init(); - // Explicitly import set_par_threads into this scope - using CollectedHeap::set_par_threads; - // Set _n_par_threads according to a policy TBD. - void set_par_threads(); - virtual Name kind() const { return CollectedHeap::G1CollectedHeap; } diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp --- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -1587,14 +1587,17 @@ } void -G1CollectorPolicy::record_concurrent_mark_cleanup_end(uint n_workers) { +G1CollectorPolicy::record_concurrent_mark_cleanup_end() { _collectionSetChooser->clear(); + FlexibleWorkGang* workers = _g1->workers(); + uint n_workers = workers->active_workers(); + uint n_regions = _g1->num_regions(); uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions); - _collectionSetChooser->prepare_for_par_region_addition(n_regions, chunk_size); + _collectionSetChooser->prepare_for_par_region_addition(n_workers, n_regions, chunk_size); ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers); - _g1->workers()->run_task(&par_known_garbage_task); + workers->run_task(&par_known_garbage_task); _collectionSetChooser->sort_regions(); diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp --- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -692,7 +692,7 @@ // Record start, end, and completion of cleanup. void record_concurrent_mark_cleanup_start(); - void record_concurrent_mark_cleanup_end(uint n_workers); + void record_concurrent_mark_cleanup_end(); void record_concurrent_mark_cleanup_completed(); // Records the information about the heap size for reporting in diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp --- a/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -127,7 +127,7 @@ MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations); { - G1RootProcessor root_processor(g1h); + G1RootProcessor root_processor(g1h, 1); root_processor.process_strong_roots(&GenMarkSweep::follow_root_closure, &GenMarkSweep::follow_cld_closure, &follow_code_closure); @@ -237,7 +237,7 @@ CodeBlobToOopClosure adjust_code_closure(&GenMarkSweep::adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations); { - G1RootProcessor root_processor(g1h); + G1RootProcessor root_processor(g1h, 1); root_processor.process_all_roots(&GenMarkSweep::adjust_pointer_closure, &GenMarkSweep::adjust_cld_closure, &adjust_code_closure); diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/g1/g1OopClosures.cpp --- a/hotspot/src/share/vm/gc/g1/g1OopClosures.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1OopClosures.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -50,8 +50,8 @@ _par_scan_state = par_scan_state; _worker_id = par_scan_state->queue_num(); - assert(_worker_id < MAX2((uint)ParallelGCThreads, 1u), - err_msg("The given worker id %u must be less than the number of threads %u", _worker_id, MAX2((uint)ParallelGCThreads, 1u))); + assert(_worker_id < ParallelGCThreads, + err_msg("The given worker id %u must be less than the number of threads " UINTX_FORMAT, _worker_id, ParallelGCThreads)); } // Generate G1 specialized oop_oop_iterate functions. diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/g1/g1RootProcessor.cpp --- a/hotspot/src/share/vm/gc/g1/g1RootProcessor.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1RootProcessor.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -90,11 +90,10 @@ void G1RootProcessor::worker_has_discovered_all_strong_classes() { - uint n_workers = _g1h->n_par_threads(); assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading"); uint new_value = (uint)Atomic::add(1, &_n_workers_discovered_strong_classes); - if (new_value == n_workers) { + if (new_value == n_workers()) { // This thread is last. Notify the others. MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag); _lock.notify_all(); @@ -102,21 +101,20 @@ } void G1RootProcessor::wait_until_all_strong_classes_discovered() { - uint n_workers = _g1h->n_par_threads(); assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading"); - if ((uint)_n_workers_discovered_strong_classes != n_workers) { + if ((uint)_n_workers_discovered_strong_classes != n_workers()) { MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag); - while ((uint)_n_workers_discovered_strong_classes != n_workers) { + while ((uint)_n_workers_discovered_strong_classes != n_workers()) { _lock.wait(Mutex::_no_safepoint_check_flag, 0, false); } } } -G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h) : +G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) : _g1h(g1h), _process_strong_tasks(new SubTasksDone(G1RP_PS_NumElements)), - _srs(), + _srs(n_workers), _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never), _n_workers_discovered_strong_classes(0) {} @@ -206,7 +204,7 @@ } } - _process_strong_tasks->all_tasks_completed(); + _process_strong_tasks->all_tasks_completed(n_workers()); } void G1RootProcessor::process_strong_roots(OopClosure* oops, @@ -216,7 +214,7 @@ process_java_roots(oops, clds, clds, NULL, blobs, NULL, 0); process_vm_roots(oops, NULL, NULL, 0); - _process_strong_tasks->all_tasks_completed(); + _process_strong_tasks->all_tasks_completed(n_workers()); } void G1RootProcessor::process_all_roots(OopClosure* oops, @@ -230,7 +228,7 @@ CodeCache::blobs_do(blobs); } - _process_strong_tasks->all_tasks_completed(); + _process_strong_tasks->all_tasks_completed(n_workers()); } void G1RootProcessor::process_java_roots(OopClosure* strong_roots, @@ -253,7 +251,7 @@ { G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i); - bool is_par = _g1h->n_par_threads() > 0; + bool is_par = n_workers() > 1; Threads::possibly_parallel_oops_do(is_par, strong_roots, thread_stack_clds, strong_code); } } @@ -329,6 +327,6 @@ _g1h->g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i); } -void G1RootProcessor::set_num_workers(uint active_workers) { - _process_strong_tasks->set_n_threads(active_workers); +uint G1RootProcessor::n_workers() const { + return _srs.n_threads(); } diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/g1/g1RootProcessor.hpp --- a/hotspot/src/share/vm/gc/g1/g1RootProcessor.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1RootProcessor.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -85,7 +85,7 @@ uint worker_i); public: - G1RootProcessor(G1CollectedHeap* g1h); + G1RootProcessor(G1CollectedHeap* g1h, uint n_workers); // Apply closures to the strongly and weakly reachable roots in the system // in a single pass. @@ -114,8 +114,8 @@ OopClosure* scan_non_heap_weak_roots, uint worker_i); - // Inform the root processor about the number of worker threads - void set_num_workers(uint active_workers); + // Number of worker threads used by the root processor. + uint n_workers() const; }; #endif // SHARE_VM_GC_G1_G1ROOTPROCESSOR_HPP diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/g1/g1StringDedup.cpp --- a/hotspot/src/share/vm/gc/g1/g1StringDedup.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1StringDedup.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -153,9 +153,7 @@ G1StringDedupUnlinkOrOopsDoTask task(is_alive, keep_alive, allow_resize_and_rehash, phase_times); G1CollectedHeap* g1h = G1CollectedHeap::heap(); - g1h->set_par_threads(); g1h->workers()->run_task(&task); - g1h->set_par_threads(0); } void G1StringDedup::threads_do(ThreadClosure* tc) { diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/g1/g1StringDedupQueue.cpp --- a/hotspot/src/share/vm/gc/g1/g1StringDedupQueue.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1StringDedupQueue.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -42,7 +42,7 @@ _cancel(false), _empty(true), _dropped(0) { - _nqueues = MAX2(ParallelGCThreads, (size_t)1); + _nqueues = ParallelGCThreads; _queues = NEW_C_HEAP_ARRAY(G1StringDedupWorkerQueue, _nqueues, mtGC); for (size_t i = 0; i < _nqueues; i++) { new (_queues + i) G1StringDedupWorkerQueue(G1StringDedupWorkerQueue::default_segment_size(), _max_cache_size, _max_size); diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/g1/g1StringDedupTable.cpp --- a/hotspot/src/share/vm/gc/g1/g1StringDedupTable.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/g1/g1StringDedupTable.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -112,7 +112,7 @@ }; G1StringDedupEntryCache::G1StringDedupEntryCache() { - _nlists = MAX2(ParallelGCThreads, (size_t)1); + _nlists = ParallelGCThreads; _lists = PaddedArray::create_unfreeable((uint)_nlists); } diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp --- a/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -832,9 +832,9 @@ _ref_processor = new ReferenceProcessor(mr, // span ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing - (int) ParallelGCThreads, // mt processing degree + (uint) ParallelGCThreads, // mt processing degree true, // mt discovery - (int) ParallelGCThreads, // mt discovery degree + (uint) ParallelGCThreads, // mt discovery degree true, // atomic_discovery &_is_alive_closure); // non-header is alive closure _counters = new CollectorCounters("PSParallelCompact", 1); @@ -2029,7 +2029,6 @@ // Set the number of GC threads to be used in this collection gc_task_manager()->set_active_gang(); gc_task_manager()->task_idle_workers(); - heap->set_par_threads(gc_task_manager()->active_workers()); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id()); diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/parallel/psScavenge.cpp --- a/hotspot/src/share/vm/gc/parallel/psScavenge.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/parallel/psScavenge.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -382,7 +382,6 @@ // Get the active number of workers here and use that value // throughout the methods. uint active_workers = gc_task_manager()->active_workers(); - heap->set_par_threads(active_workers); PSPromotionManager::pre_scavenge(); @@ -846,9 +845,9 @@ _ref_processor = new ReferenceProcessor(mr, // span ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing - (int) ParallelGCThreads, // mt processing degree + (uint) ParallelGCThreads, // mt processing degree true, // mt discovery - (int) ParallelGCThreads, // mt discovery degree + (uint) ParallelGCThreads, // mt discovery degree true, // atomic_discovery NULL); // header provides liveness info diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/serial/defNewGeneration.cpp --- a/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -38,6 +38,7 @@ #include "gc/shared/referencePolicy.hpp" #include "gc/shared/space.inline.hpp" #include "gc/shared/spaceDecorator.hpp" +#include "gc/shared/strongRootsScope.hpp" #include "memory/iterator.hpp" #include "oops/instanceRefKlass.hpp" #include "oops/oop.inline.hpp" @@ -454,7 +455,7 @@ } } -void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) { +void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) { assert(false, "NYI -- are you sure you want to call this?"); } @@ -625,15 +626,22 @@ assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); - gch->gen_process_roots(_level, - true, // Process younger gens, if any, - // as strong roots. - true, // activate StrongRootsScope - GenCollectedHeap::SO_ScavengeCodeCache, - GenCollectedHeap::StrongAndWeakRoots, - &fsc_with_no_gc_barrier, - &fsc_with_gc_barrier, - &cld_scan_closure); + { + // DefNew needs to run with n_threads == 0, to make sure the serial + // version of the card table scanning code is used. + // See: CardTableModRefBS::non_clean_card_iterate_possibly_parallel. + StrongRootsScope srs(0); + + gch->gen_process_roots(&srs, + _level, + true, // Process younger gens, if any, + // as strong roots. + GenCollectedHeap::SO_ScavengeCodeCache, + GenCollectedHeap::StrongAndWeakRoots, + &fsc_with_no_gc_barrier, + &fsc_with_gc_barrier, + &cld_scan_closure); + } // "evacuate followers". evacuate_followers.do_void(); diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/serial/defNewGeneration.hpp --- a/hotspot/src/share/vm/gc/serial/defNewGeneration.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -255,7 +255,7 @@ // Iteration void object_iterate(ObjectClosure* blk); - void younger_refs_iterate(OopsInGenClosure* cl); + void younger_refs_iterate(OopsInGenClosure* cl, uint n_threads); void space_iterate(SpaceClosure* blk, bool usedOnly = false); diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/serial/genMarkSweep.cpp --- a/hotspot/src/share/vm/gc/serial/genMarkSweep.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/serial/genMarkSweep.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -40,6 +40,7 @@ #include "gc/shared/modRefBarrierSet.hpp" #include "gc/shared/referencePolicy.hpp" #include "gc/shared/space.hpp" +#include "gc/shared/strongRootsScope.hpp" #include "oops/instanceRefKlass.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiExport.hpp" @@ -200,14 +201,18 @@ // Need new claim bits before marking starts. ClassLoaderDataGraph::clear_claimed_marks(); - gch->gen_process_roots(level, - false, // Younger gens are not roots. - true, // activate StrongRootsScope - GenCollectedHeap::SO_None, - GenCollectedHeap::StrongRootsOnly, - &follow_root_closure, - &follow_root_closure, - &follow_cld_closure); + { + StrongRootsScope srs(1); + + gch->gen_process_roots(&srs, + level, + false, // Younger gens are not roots. + GenCollectedHeap::SO_None, + GenCollectedHeap::StrongRootsOnly, + &follow_root_closure, + &follow_root_closure, + &follow_cld_closure); + } // Process reference objects found during marking { @@ -284,14 +289,18 @@ assert(level == 1, "We don't use mark-sweep on young generations."); adjust_pointer_closure.set_orig_generation(gch->old_gen()); - gch->gen_process_roots(level, - false, // Younger gens are not roots. - true, // activate StrongRootsScope - GenCollectedHeap::SO_AllCodeCache, - GenCollectedHeap::StrongAndWeakRoots, - &adjust_pointer_closure, - &adjust_pointer_closure, - &adjust_cld_closure); + { + StrongRootsScope srs(1); + + gch->gen_process_roots(&srs, + level, + false, // Younger gens are not roots. + GenCollectedHeap::SO_AllCodeCache, + GenCollectedHeap::StrongAndWeakRoots, + &adjust_pointer_closure, + &adjust_pointer_closure, + &adjust_cld_closure); + } gch->gen_process_weak_roots(&adjust_pointer_closure); diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/adaptiveSizePolicy.cpp --- a/hotspot/src/share/vm/gc/shared/adaptiveSizePolicy.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/adaptiveSizePolicy.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -161,7 +161,7 @@ } _debug_perturbation = !_debug_perturbation; } - assert((new_active_workers <= (uintx) ParallelGCThreads) && + assert((new_active_workers <= ParallelGCThreads) && (new_active_workers >= min_workers), "Jiggled active workers too much"); } diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/cardGeneration.cpp --- a/hotspot/src/share/vm/gc/shared/cardGeneration.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/cardGeneration.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -353,8 +353,8 @@ blk->do_space(space()); } -void CardGeneration::younger_refs_iterate(OopsInGenClosure* blk) { +void CardGeneration::younger_refs_iterate(OopsInGenClosure* blk, uint n_threads) { blk->set_generation(this); - younger_refs_in_space_iterate(space(), blk); + younger_refs_in_space_iterate(space(), blk, n_threads); blk->reset_generation(); } diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/cardGeneration.hpp --- a/hotspot/src/share/vm/gc/shared/cardGeneration.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/cardGeneration.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -89,7 +89,7 @@ void space_iterate(SpaceClosure* blk, bool usedOnly = false); - void younger_refs_iterate(OopsInGenClosure* blk); + void younger_refs_iterate(OopsInGenClosure* blk, uint n_threads); bool is_in(const void* p) const; diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/cardTableModRefBS.cpp --- a/hotspot/src/share/vm/gc/shared/cardTableModRefBS.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/cardTableModRefBS.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -440,31 +440,11 @@ void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr, OopsInGenClosure* cl, - CardTableRS* ct) { + CardTableRS* ct, + uint n_threads) { if (!mr.is_empty()) { - // Caller (process_roots()) claims that all GC threads - // execute this call. With UseDynamicNumberOfGCThreads now all - // active GC threads execute this call. The number of active GC - // threads needs to be passed to par_non_clean_card_iterate_work() - // to get proper partitioning and termination. - // - // This is an example of where n_par_threads() is used instead - // of workers()->active_workers(). n_par_threads can be set to 0 to - // turn off parallelism. For example when this code is called as - // part of verification during root processing then n_par_threads() - // may have been set to 0. active_workers is not overloaded with - // the meaning that it is a switch to disable parallelism and so keeps - // the meaning of the number of active gc workers. If parallelism has - // not been shut off by setting n_par_threads to 0, then n_par_threads - // should be equal to active_workers. When a different mechanism for - // shutting off parallelism is used, then active_workers can be used in - // place of n_par_threads. - int n_threads = GenCollectedHeap::heap()->n_par_threads(); - bool is_par = n_threads > 0; - if (is_par) { + if (n_threads > 0) { #if INCLUDE_ALL_GCS - assert(GenCollectedHeap::heap()->n_par_threads() == - GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch"); non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads); #else // INCLUDE_ALL_GCS fatal("Parallel gc not supported here."); @@ -472,8 +452,11 @@ } else { // clear_cl finds contiguous dirty ranges of cards to process and clear. - DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary()); - ClearNoncleanCardWrapper clear_cl(dcto_cl, ct); + // This is the single-threaded version used by DefNew. + const bool parallel = false; + + DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel); + ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel); clear_cl.do_MemRegion(mr); } diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/cardTableModRefBS.hpp --- a/hotspot/src/share/vm/gc/shared/cardTableModRefBS.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/cardTableModRefBS.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -178,14 +178,15 @@ // region mr in the given space and apply cl to any dirty sub-regions // of mr. Clears the dirty cards as they are processed. void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr, - OopsInGenClosure* cl, CardTableRS* ct); + OopsInGenClosure* cl, CardTableRS* ct, + uint n_threads); private: // Work method used to implement non_clean_card_iterate_possibly_parallel() // above in the parallel case. void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, OopsInGenClosure* cl, CardTableRS* ct, - int n_threads); + uint n_threads); protected: // Dirty the bytes corresponding to "mr" (not all of which must be diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/cardTableRS.cpp --- a/hotspot/src/share/vm/gc/shared/cardTableRS.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/cardTableRS.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -102,9 +102,10 @@ } void CardTableRS::younger_refs_iterate(Generation* g, - OopsInGenClosure* blk) { + OopsInGenClosure* blk, + uint n_threads) { _last_cur_val_in_gen[g->level()+1] = cur_youngergen_card_val(); - g->younger_refs_iterate(blk); + g->younger_refs_iterate(blk, n_threads); } inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) { @@ -164,15 +165,8 @@ } ClearNoncleanCardWrapper::ClearNoncleanCardWrapper( - DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct) : - _dirty_card_closure(dirty_card_closure), _ct(ct) { - // Cannot yet substitute active_workers for n_par_threads - // in the case where parallelism is being turned off by - // setting n_par_threads to 0. - _is_par = (GenCollectedHeap::heap()->n_par_threads() > 0); - assert(!_is_par || - (GenCollectedHeap::heap()->n_par_threads() == - GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch"); + DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par) : + _dirty_card_closure(dirty_card_closure), _ct(ct), _is_par(is_par) { } bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) { @@ -272,7 +266,8 @@ } void CardTableRS::younger_refs_in_space_iterate(Space* sp, - OopsInGenClosure* cl) { + OopsInGenClosure* cl, + uint n_threads) { const MemRegion urasm = sp->used_region_at_save_marks(); #ifdef ASSERT // Convert the assertion check to a warning if we are running @@ -301,7 +296,7 @@ ShouldNotReachHere(); } #endif - _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this); + _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads); } void CardTableRS::clear_into_younger(Generation* old_gen) { diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/cardTableRS.hpp --- a/hotspot/src/share/vm/gc/shared/cardTableRS.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/cardTableRS.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -56,7 +56,7 @@ CardTableModRefBSForCTRS* _ct_bs; - virtual void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl); + virtual void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads); void verify_space(Space* s, HeapWord* gen_start); @@ -116,7 +116,7 @@ // Card table entries are cleared before application; "blk" is // responsible for dirtying if the oop is still older-to-younger after // closure application. - void younger_refs_iterate(Generation* g, OopsInGenClosure* blk); + void younger_refs_iterate(Generation* g, OopsInGenClosure* blk, uint n_threads); void inline_write_ref_field_gc(void* field, oop new_val) { jbyte* byte = _ct_bs->byte_for(field); @@ -183,7 +183,7 @@ bool is_word_aligned(jbyte* entry); public: - ClearNoncleanCardWrapper(DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct); + ClearNoncleanCardWrapper(DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par); void do_MemRegion(MemRegion mr); }; diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/collectedHeap.cpp --- a/hotspot/src/share/vm/gc/shared/collectedHeap.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/collectedHeap.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -160,8 +160,7 @@ // Memory state functions. -CollectedHeap::CollectedHeap() : _n_par_threads(0) -{ +CollectedHeap::CollectedHeap() { const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); const size_t elements_per_word = HeapWordSize / sizeof(jint); _filler_array_max_size = align_object_size(filler_array_hdr_size() + diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/collectedHeap.hpp --- a/hotspot/src/share/vm/gc/shared/collectedHeap.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/collectedHeap.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -101,7 +101,6 @@ protected: BarrierSet* _barrier_set; bool _is_gc_active; - uint _n_par_threads; unsigned int _total_collections; // ... started unsigned int _total_full_collections; // ... started @@ -291,12 +290,6 @@ } GCCause::Cause gc_cause() { return _gc_cause; } - // Number of threads currently working on GC tasks. - uint n_par_threads() { return _n_par_threads; } - - // May be overridden to set additional parallelism. - virtual void set_par_threads(uint t) { _n_par_threads = t; }; - // General obj/array allocation facilities. inline static oop obj_allocate(KlassHandle klass, int size, TRAPS); inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS); diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp --- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -561,16 +561,6 @@ return collector_policy()->satisfy_failed_allocation(size, is_tlab); } -void GenCollectedHeap::set_par_threads(uint t) { - assert(t == 0 || !UseSerialGC, "Cannot have parallel threads"); - CollectedHeap::set_par_threads(t); - set_n_termination(t); -} - -void GenCollectedHeap::set_n_termination(uint t) { - _process_strong_tasks->set_n_threads(t); -} - #ifdef ASSERT class AssertNonScavengableClosure: public OopClosure { public: @@ -582,15 +572,13 @@ static AssertNonScavengableClosure assert_is_non_scavengable_closure; #endif -void GenCollectedHeap::process_roots(bool activate_scope, +void GenCollectedHeap::process_roots(StrongRootsScope* scope, ScanningOption so, OopClosure* strong_roots, OopClosure* weak_roots, CLDClosure* strong_cld_closure, CLDClosure* weak_cld_closure, CodeBlobClosure* code_roots) { - StrongRootsScope srs(activate_scope); - // General roots. assert(Threads::thread_claim_parity() != 0, "must have called prologue code"); assert(code_roots != NULL, "code root closure should always be set"); @@ -609,7 +597,7 @@ // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots; - bool is_par = n_par_threads() > 0; + bool is_par = scope->n_threads() > 1; Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_clds_p, roots_from_code_p); if (!_process_strong_tasks->is_task_claimed(GCH_PS_Universe_oops_do)) { @@ -669,9 +657,9 @@ } -void GenCollectedHeap::gen_process_roots(int level, +void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope, + int level, bool younger_gens_as_roots, - bool activate_scope, ScanningOption so, bool only_strong_roots, OopsInGenClosure* not_older_gens, @@ -689,7 +677,7 @@ OopsInGenClosure* weak_roots = only_strong_roots ? NULL : not_older_gens; CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure; - process_roots(activate_scope, so, + process_roots(scope, so, not_older_gens, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure); @@ -707,11 +695,11 @@ // older-gen scanning. if (level == 0) { older_gens->set_generation(_old_gen); - rem_set()->younger_refs_iterate(_old_gen, older_gens); + rem_set()->younger_refs_iterate(_old_gen, older_gens, scope->n_threads()); older_gens->reset_generation(); } - _process_strong_tasks->all_tasks_completed(); + _process_strong_tasks->all_tasks_completed(scope->n_threads()); } diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp --- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -30,8 +30,9 @@ #include "gc/shared/collectorPolicy.hpp" #include "gc/shared/generation.hpp" +class FlexibleWorkGang; +class StrongRootsScope; class SubTasksDone; -class FlexibleWorkGang; // A "GenCollectedHeap" is a CollectedHeap that uses generational // collection. It has two generations, young and old. @@ -363,9 +364,6 @@ // asserted to be this type. static GenCollectedHeap* heap(); - void set_par_threads(uint t); - void set_n_termination(uint t); - // Invoke the "do_oop" method of one of the closures "not_older_gens" // or "older_gens" on root locations for the generation at // "level". (The "older_gens" closure is used for scanning references @@ -385,7 +383,7 @@ }; private: - void process_roots(bool activate_scope, + void process_roots(StrongRootsScope* scope, ScanningOption so, OopClosure* strong_roots, OopClosure* weak_roots, @@ -393,24 +391,13 @@ CLDClosure* weak_cld_closure, CodeBlobClosure* code_roots); - void gen_process_roots(int level, - bool younger_gens_as_roots, - bool activate_scope, - ScanningOption so, - OopsInGenClosure* not_older_gens, - OopsInGenClosure* weak_roots, - OopsInGenClosure* older_gens, - CLDClosure* cld_closure, - CLDClosure* weak_cld_closure, - CodeBlobClosure* code_closure); - public: static const bool StrongAndWeakRoots = false; static const bool StrongRootsOnly = true; - void gen_process_roots(int level, + void gen_process_roots(StrongRootsScope* scope, + int level, bool younger_gens_as_roots, - bool activate_scope, ScanningOption so, bool only_strong_roots, OopsInGenClosure* not_older_gens, diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/genOopClosures.hpp --- a/hotspot/src/share/vm/gc/shared/genOopClosures.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/genOopClosures.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -35,11 +35,6 @@ class DefNewGeneration; class KlassRemSet; -template class GenericTaskQueue; -typedef GenericTaskQueue OopTaskQueue; -template class GenericTaskQueueSet; -typedef GenericTaskQueueSet OopTaskQueueSet; - // Closure for iterating roots from a particular generation // Note: all classes deriving from this MUST call this do_barrier // method at the end of their own do_oop method! diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/genRemSet.hpp --- a/hotspot/src/share/vm/gc/shared/genRemSet.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/genRemSet.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -77,10 +77,11 @@ // 1) that are in objects allocated in "g" at the time of the last call // to "save_Marks", and // 2) that point to objects in younger generations. - virtual void younger_refs_iterate(Generation* g, OopsInGenClosure* blk) = 0; + virtual void younger_refs_iterate(Generation* g, OopsInGenClosure* blk, uint n_threads) = 0; virtual void younger_refs_in_space_iterate(Space* sp, - OopsInGenClosure* cl) = 0; + OopsInGenClosure* cl, + uint n_threads) = 0; // This method is used to notify the remembered set that "new_val" has // been written into "field" by the garbage collector. diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/generation.cpp --- a/hotspot/src/share/vm/gc/shared/generation.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/generation.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -293,9 +293,10 @@ } void Generation::younger_refs_in_space_iterate(Space* sp, - OopsInGenClosure* cl) { + OopsInGenClosure* cl, + uint n_threads) { GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); - rs->younger_refs_in_space_iterate(sp, cl); + rs->younger_refs_in_space_iterate(sp, cl, n_threads); } class GenerationObjIterateClosure : public SpaceClosure { diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/generation.hpp --- a/hotspot/src/share/vm/gc/shared/generation.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/generation.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -122,7 +122,7 @@ // The iteration is only over objects allocated at the start of the // iterations; objects allocated as a result of applying the closure are // not included. - void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl); + void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads); public: // The set of possible generation kinds. @@ -526,7 +526,7 @@ // in the current generation that contain pointers to objects in younger // generations. Objects allocated since the last "save_marks" call are // excluded. - virtual void younger_refs_iterate(OopsInGenClosure* cl) = 0; + virtual void younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) = 0; // Inform a generation that it longer contains references to objects // in any younger generation. [e.g. Because younger gens are empty, diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/space.cpp --- a/hotspot/src/share/vm/gc/shared/space.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/space.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -181,7 +181,8 @@ DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl, CardTableModRefBS::PrecisionStyle precision, - HeapWord* boundary) { + HeapWord* boundary, + bool parallel) { return new DirtyCardToOopClosure(this, cl, precision, boundary); } @@ -260,7 +261,8 @@ DirtyCardToOopClosure* ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl, CardTableModRefBS::PrecisionStyle precision, - HeapWord* boundary) { + HeapWord* boundary, + bool parallel) { return new ContiguousSpaceDCTOC(this, cl, precision, boundary); } diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/space.hpp --- a/hotspot/src/share/vm/gc/shared/space.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/space.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -183,7 +183,8 @@ // operate. ResourceArea allocated. virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, CardTableModRefBS::PrecisionStyle precision, - HeapWord* boundary = NULL); + HeapWord* boundary, + bool parallel); // If "p" is in the space, returns the address of the start of the // "block" that contains "p". We say "block" instead of "object" since @@ -629,7 +630,8 @@ // Override. DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, CardTableModRefBS::PrecisionStyle precision, - HeapWord* boundary = NULL); + HeapWord* boundary, + bool parallel); // Apply "blk->do_oop" to the addresses of all reference fields in objects // starting with the _saved_mark_word, which was noted during a generation's diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/strongRootsScope.cpp --- a/hotspot/src/share/vm/gc/shared/strongRootsScope.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/strongRootsScope.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -28,24 +28,18 @@ #include "gc/shared/strongRootsScope.hpp" #include "runtime/thread.hpp" -MarkScope::MarkScope(bool activate) : _active(activate) { - if (_active) { - nmethod::oops_do_marking_prologue(); - } +MarkScope::MarkScope() { + nmethod::oops_do_marking_prologue(); } MarkScope::~MarkScope() { - if (_active) { - nmethod::oops_do_marking_epilogue(); - } + nmethod::oops_do_marking_epilogue(); } -StrongRootsScope::StrongRootsScope(bool activate) : MarkScope(activate) { - if (_active) { - Threads::change_thread_claim_parity(); - // Zero the claimed high water mark in the StringTable - StringTable::clear_parallel_claimed_index(); - } +StrongRootsScope::StrongRootsScope(uint n_threads) : _n_threads(n_threads) { + Threads::change_thread_claim_parity(); + // Zero the claimed high water mark in the StringTable + StringTable::clear_parallel_claimed_index(); } StrongRootsScope::~StrongRootsScope() { diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/strongRootsScope.hpp --- a/hotspot/src/share/vm/gc/shared/strongRootsScope.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/strongRootsScope.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -29,18 +29,21 @@ class MarkScope : public StackObj { protected: - bool _active; - public: - MarkScope(bool activate = true); + MarkScope(); ~MarkScope(); }; // Sets up and tears down the required state for parallel root processing. class StrongRootsScope : public MarkScope { + // Number of threads participating in the roots processing. + const uint _n_threads; + public: - StrongRootsScope(bool activate = true); + StrongRootsScope(uint n_threads); ~StrongRootsScope(); + + uint n_threads() const { return _n_threads; } }; #endif // SHARE_VM_GC_SHARED_STRONGROOTSSCOPE_HPP diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/taskqueue.hpp --- a/hotspot/src/share/vm/gc/shared/taskqueue.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/taskqueue.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -382,6 +382,8 @@ bool steal(uint queue_num, int* seed, E& t); bool peek(); + + uint size() const { return _n; } }; template void diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/workgroup.cpp --- a/hotspot/src/share/vm/gc/shared/workgroup.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/workgroup.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -133,8 +133,6 @@ } void WorkGang::run_task(AbstractGangTask* task, uint no_of_parallel_workers) { - task->set_for_termination(no_of_parallel_workers); - // This thread is executed by the VM thread which does not block // on ordinary MutexLocker's. MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag); @@ -434,7 +432,7 @@ // SubTasksDone functions. SubTasksDone::SubTasksDone(uint n) : - _n_tasks(n), _n_threads(1), _tasks(NULL) { + _n_tasks(n), _tasks(NULL) { _tasks = NEW_C_HEAP_ARRAY(uint, n, mtInternal); guarantee(_tasks != NULL, "alloc failure"); clear(); @@ -444,12 +442,6 @@ return _tasks != NULL; } -void SubTasksDone::set_n_threads(uint t) { - assert(_claimed == 0 || _threads_completed == _n_threads, - "should not be called while tasks are being processed!"); - _n_threads = (t == 0 ? 1 : t); -} - void SubTasksDone::clear() { for (uint i = 0; i < _n_tasks; i++) { _tasks[i] = 0; @@ -477,7 +469,7 @@ return res; } -void SubTasksDone::all_tasks_completed() { +void SubTasksDone::all_tasks_completed(uint n_threads) { jint observed = _threads_completed; jint old; do { @@ -485,7 +477,10 @@ observed = Atomic::cmpxchg(old+1, &_threads_completed, old); } while (observed != old); // If this was the last thread checking in, clear the tasks. - if (observed+1 == (jint)_n_threads) clear(); + uint adjusted_thread_count = (n_threads == 0 ? 1 : n_threads); + if (observed + 1 == (jint)adjusted_thread_count) { + clear(); + } } diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/gc/shared/workgroup.hpp --- a/hotspot/src/share/vm/gc/shared/workgroup.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/gc/shared/workgroup.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -59,13 +59,6 @@ // The argument tells you which member of the gang you are. virtual void work(uint worker_id) = 0; - // This method configures the task for proper termination. - // Some tasks do not have any requirements on termination - // and may inherit this method that does nothing. Some - // tasks do some coordination on termination and override - // this method to implement that coordination. - virtual void set_for_termination(uint active_workers) {}; - // Debugging accessor for the name. const char* name() const PRODUCT_RETURN_(return NULL;); int counter() { return _counter; } @@ -99,12 +92,9 @@ OopTaskQueueSet* _queues; ParallelTaskTerminator _terminator; public: - AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues) : - AbstractGangTask(name), _queues(queues), _terminator(0, _queues) {} + AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) : + AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {} ParallelTaskTerminator* terminator() { return &_terminator; } - virtual void set_for_termination(uint active_workers) { - terminator()->reset_for_reuse(active_workers); - } OopTaskQueueSet* queues() { return _queues; } }; @@ -315,16 +305,20 @@ uint _active_workers; public: // Constructor and destructor. - // Initialize active_workers to a minimum value. Setting it to - // the parameter "workers" will initialize it to a maximum - // value which is not desirable. FlexibleWorkGang(const char* name, uint workers, bool are_GC_task_threads, bool are_ConcurrentGC_threads) : WorkGang(name, workers, are_GC_task_threads, are_ConcurrentGC_threads), - _active_workers(UseDynamicNumberOfGCThreads ? 1U : ParallelGCThreads) {} - // Accessors for fields - virtual uint active_workers() const { return _active_workers; } + _active_workers(UseDynamicNumberOfGCThreads ? 1U : workers) {} + + // Accessors for fields. + virtual uint active_workers() const { + assert(_active_workers <= _total_workers, + err_msg("_active_workers: %u > _total_workers: %u", _active_workers, _total_workers)); + assert(UseDynamicNumberOfGCThreads || _active_workers == _total_workers, + "Unless dynamic should use total workers"); + return _active_workers; + } void set_active_workers(uint v) { assert(v <= _total_workers, "Trying to set more workers active than there are"); @@ -390,12 +384,6 @@ class SubTasksDone: public CHeapObj { uint* _tasks; uint _n_tasks; - // _n_threads is used to determine when a sub task is done. - // It does not control how many threads will execute the subtask - // but must be initialized to the number that do execute the task - // in order to correctly decide when the subtask is done (all the - // threads working on the task have finished). - uint _n_threads; uint _threads_completed; #ifdef ASSERT volatile uint _claimed; @@ -413,11 +401,6 @@ // True iff the object is in a valid state. bool valid(); - // Get/set the number of parallel threads doing the tasks to "t". Can only - // be called before tasks start or after they are complete. - uint n_threads() { return _n_threads; } - void set_n_threads(uint t); - // Returns "false" if the task "t" is unclaimed, and ensures that task is // claimed. The task "t" is required to be within the range of "this". bool is_task_claimed(uint t); @@ -426,7 +409,9 @@ // tasks that it will try to claim. Every thread in the parallel task // must execute this. (When the last thread does so, the task array is // cleared.) - void all_tasks_completed(); + // + // n_threads - Number of threads executing the sub-tasks. + void all_tasks_completed(uint n_threads); // Destructor. ~SubTasksDone(); diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/memory/iterator.hpp --- a/hotspot/src/share/vm/memory/iterator.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/memory/iterator.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -381,9 +381,4 @@ template static bool do_metadata(OopClosureType* closure); }; -// Helper to convert the oop iterate macro suffixes into bool values that can be used by template functions. -#define nvs_nv_to_bool true -#define nvs_v_to_bool false -#define nvs_to_bool(nv_suffix) nvs##nv_suffix##_to_bool - #endif // SHARE_VM_MEMORY_ITERATOR_HPP diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/oops/arrayKlass.hpp --- a/hotspot/src/share/vm/oops/arrayKlass.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/oops/arrayKlass.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -144,4 +144,36 @@ void oop_verify_on(oop obj, outputStream* st); }; +// Array oop iteration macros for declarations. +// Used to generate the declarations in the *ArrayKlass header files. + +#define OOP_OOP_ITERATE_DECL_RANGE(OopClosureType, nv_suffix) \ + int oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end); + +#if INCLUDE_ALL_GCS +// Named NO_BACKWARDS because the definition used by *ArrayKlass isn't reversed, see below. +#define OOP_OOP_ITERATE_DECL_NO_BACKWARDS(OopClosureType, nv_suffix) \ + int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure); +#endif // INCLUDE_ALL_GCS + + +// Array oop iteration macros for definitions. +// Used to generate the definitions in the *ArrayKlass.inline.hpp files. + +#define OOP_OOP_ITERATE_DEFN_RANGE(KlassType, OopClosureType, nv_suffix) \ + \ +int KlassType::oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end) { \ + return oop_oop_iterate_range(obj, closure, start, end); \ +} + +#if INCLUDE_ALL_GCS +#define OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(KlassType, OopClosureType, nv_suffix) \ +int KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ + /* No reverse implementation ATM. */ \ + return oop_oop_iterate(obj, closure); \ +} +#else +#define OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(KlassType, OopClosureType, nv_suffix) +#endif + #endif // SHARE_VM_OOPS_ARRAYKLASS_HPP diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/oops/instanceClassLoaderKlass.hpp --- a/hotspot/src/share/vm/oops/instanceClassLoaderKlass.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/oops/instanceClassLoaderKlass.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -87,19 +87,12 @@ public: -#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \ - int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, MemRegion mr); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL) #if INCLUDE_ALL_GCS -#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_BACKWARDS) + ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_BACKWARDS) #endif // INCLUDE_ALL_GCS }; diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp --- a/hotspot/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -78,33 +78,9 @@ return size; } - -#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -int InstanceClassLoaderKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ - return oop_oop_iterate(obj, closure); \ -} - -#if INCLUDE_ALL_GCS -#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ - \ -int InstanceClassLoaderKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ - return oop_oop_iterate_reverse(obj, closure); \ -} -#else -#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) -#endif - - -#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ - \ -int InstanceClassLoaderKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \ - return oop_oop_iterate_bounded(obj, closure, mr); \ -} - #define ALL_INSTANCE_CLASS_LOADER_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \ - InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \ - InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) + OOP_OOP_ITERATE_DEFN( InstanceClassLoaderKlass, OopClosureType, nv_suffix) \ + OOP_OOP_ITERATE_DEFN_BOUNDED( InstanceClassLoaderKlass, OopClosureType, nv_suffix) \ + OOP_OOP_ITERATE_DEFN_BACKWARDS(InstanceClassLoaderKlass, OopClosureType, nv_suffix) #endif // SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_INLINE_HPP diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/oops/instanceKlass.hpp --- a/hotspot/src/share/vm/oops/instanceKlass.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/oops/instanceKlass.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -1084,19 +1084,12 @@ public: -#define InstanceKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure); \ - int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL) #if INCLUDE_ALL_GCS -#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_BACKWARDS) + ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_BACKWARDS) #endif // INCLUDE_ALL_GCS u2 idnum_allocated_count() const { return _idnum_allocated_count; } diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/oops/instanceKlass.inline.hpp --- a/hotspot/src/share/vm/oops/instanceKlass.inline.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/oops/instanceKlass.inline.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -27,6 +27,7 @@ #include "memory/iterator.hpp" #include "oops/instanceKlass.hpp" +#include "oops/klass.hpp" #include "oops/oop.inline.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" @@ -187,29 +188,9 @@ #undef INLINE - -#define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ -int InstanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ - return oop_oop_iterate(obj, closure); \ -} - -#if INCLUDE_ALL_GCS -#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ -int InstanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ - return oop_oop_iterate_reverse(obj, closure); \ -} -#else -#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) -#endif - -#define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ -int InstanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \ - return oop_oop_iterate_bounded(obj, closure, mr); \ -} - #define ALL_INSTANCE_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - InstanceKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \ - InstanceKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \ - InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) + OOP_OOP_ITERATE_DEFN( InstanceKlass, OopClosureType, nv_suffix) \ + OOP_OOP_ITERATE_DEFN_BOUNDED( InstanceKlass, OopClosureType, nv_suffix) \ + OOP_OOP_ITERATE_DEFN_BACKWARDS(InstanceKlass, OopClosureType, nv_suffix) #endif // SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/oops/instanceMirrorKlass.hpp --- a/hotspot/src/share/vm/oops/instanceMirrorKlass.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/oops/instanceMirrorKlass.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -149,19 +149,12 @@ public: -#define InstanceMirrorKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \ - int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, MemRegion mr); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL) #if INCLUDE_ALL_GCS -#define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_BACKWARDS) + ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_BACKWARDS) #endif // INCLUDE_ALL_GCS }; diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/oops/instanceMirrorKlass.inline.hpp --- a/hotspot/src/share/vm/oops/instanceMirrorKlass.inline.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/oops/instanceMirrorKlass.inline.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -27,6 +27,7 @@ #include "classfile/javaClasses.hpp" #include "oops/instanceKlass.inline.hpp" #include "oops/instanceMirrorKlass.hpp" +#include "oops/klass.hpp" #include "oops/oop.inline.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" @@ -132,33 +133,9 @@ return oop_size(obj); } - -#define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -int InstanceMirrorKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ - return oop_oop_iterate(obj, closure); \ -} - -#if INCLUDE_ALL_GCS -#define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ - \ -int InstanceMirrorKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ - return oop_oop_iterate_reverse(obj, closure); \ -} -#else -#define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) -#endif - - -#define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ - \ -int InstanceMirrorKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \ - return oop_oop_iterate_bounded(obj, closure, mr); \ -} - #define ALL_INSTANCE_MIRROR_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \ - InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \ - InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) + OOP_OOP_ITERATE_DEFN( InstanceMirrorKlass, OopClosureType, nv_suffix) \ + OOP_OOP_ITERATE_DEFN_BOUNDED( InstanceMirrorKlass, OopClosureType, nv_suffix) \ + OOP_OOP_ITERATE_DEFN_BACKWARDS(InstanceMirrorKlass, OopClosureType, nv_suffix) #endif // SHARE_VM_OOPS_INSTANCEMIRRORKLASS_INLINE_HPP diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/oops/instanceRefKlass.hpp --- a/hotspot/src/share/vm/oops/instanceRefKlass.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/oops/instanceRefKlass.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -119,19 +119,12 @@ public: -#define InstanceRefKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure); \ - int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL) #if INCLUDE_ALL_GCS -#define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_BACKWARDS) + ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_BACKWARDS) #endif // INCLUDE_ALL_GCS static void release_and_notify_pending_list_lock(BasicLock *pending_list_basic_lock); diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/oops/instanceRefKlass.inline.hpp --- a/hotspot/src/share/vm/oops/instanceRefKlass.inline.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/oops/instanceRefKlass.inline.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -141,34 +141,9 @@ // Macro to define InstanceRefKlass::oop_oop_iterate for virtual/nonvirtual for // all closures. Macros calling macros above for each oop size. - -#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -int InstanceRefKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ - return oop_oop_iterate(obj, closure); \ -} - -#if INCLUDE_ALL_GCS -#define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ - \ -int InstanceRefKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ - return oop_oop_iterate_reverse(obj, closure); \ -} -#else -#define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) -#endif - - -#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ - \ -int InstanceRefKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \ - return oop_oop_iterate_bounded(obj, closure, mr); \ -} - #define ALL_INSTANCE_REF_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - InstanceRefKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \ - InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \ - InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) - + OOP_OOP_ITERATE_DEFN( InstanceRefKlass, OopClosureType, nv_suffix) \ + OOP_OOP_ITERATE_DEFN_BOUNDED( InstanceRefKlass, OopClosureType, nv_suffix) \ + OOP_OOP_ITERATE_DEFN_BACKWARDS(InstanceRefKlass, OopClosureType, nv_suffix) #endif // SHARE_VM_OOPS_INSTANCEREFKLASS_INLINE_HPP diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/oops/klass.hpp --- a/hotspot/src/share/vm/oops/klass.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/oops/klass.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -583,20 +583,20 @@ // Iterators specialized to particular subtypes // of ExtendedOopClosure, to avoid closure virtual calls. -#define Klass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - virtual int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) = 0; \ - /* Iterates "closure" over all the oops in "obj" (of type "this") within "mr". */ \ - virtual int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) = 0; +#define Klass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ + virtual int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) = 0; \ + /* Iterates "closure" over all the oops in "obj" (of type "this") within "mr". */ \ + virtual int oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr) = 0; ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL) ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL) #if INCLUDE_ALL_GCS -#define Klass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ +#define Klass_OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix) \ virtual int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) = 0; - ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_BACKWARDS_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_BACKWARDS_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL_BACKWARDS) + ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL_BACKWARDS) #endif // INCLUDE_ALL_GCS virtual void array_klasses_do(void f(Klass* k)) {} @@ -651,4 +651,44 @@ void klass_update_barrier_set_pre(oop* p, oop v); }; +// Helper to convert the oop iterate macro suffixes into bool values that can be used by template functions. +#define nvs_nv_to_bool true +#define nvs_v_to_bool false +#define nvs_to_bool(nv_suffix) nvs##nv_suffix##_to_bool + +// Oop iteration macros for declarations. +// Used to generate declarations in the *Klass header files. + +#define OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ + int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure); \ + int oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr); + +#if INCLUDE_ALL_GCS +#define OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix) \ + int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure); +#endif // INCLUDE_ALL_GCS + + +// Oop iteration macros for definitions. +// Used to generate definitions in the *Klass.inline.hpp files. + +#define OOP_OOP_ITERATE_DEFN(KlassType, OopClosureType, nv_suffix) \ +int KlassType::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ + return oop_oop_iterate(obj, closure); \ +} + +#if INCLUDE_ALL_GCS +#define OOP_OOP_ITERATE_DEFN_BACKWARDS(KlassType, OopClosureType, nv_suffix) \ +int KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ + return oop_oop_iterate_reverse(obj, closure); \ +} +#else +#define OOP_OOP_ITERATE_DEFN_BACKWARDS(KlassType, OopClosureType, nv_suffix) +#endif + +#define OOP_OOP_ITERATE_DEFN_BOUNDED(KlassType, OopClosureType, nv_suffix) \ +int KlassType::oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr) { \ + return oop_oop_iterate_bounded(obj, closure, mr); \ +} + #endif // SHARE_VM_OOPS_KLASS_HPP diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/oops/objArrayKlass.hpp --- a/hotspot/src/share/vm/oops/objArrayKlass.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/oops/objArrayKlass.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -163,22 +163,14 @@ public: -#define ObjArrayKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \ - int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, \ - MemRegion mr); \ - int oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* blk, \ - int start, int end); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_RANGE) + ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_RANGE) #if INCLUDE_ALL_GCS -#define ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_NO_BACKWARDS) + ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_NO_BACKWARDS) #endif // INCLUDE_ALL_GCS // JVM support diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/oops/objArrayKlass.inline.hpp --- a/hotspot/src/share/vm/oops/objArrayKlass.inline.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/oops/objArrayKlass.inline.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -27,6 +27,8 @@ #include "memory/memRegion.hpp" #include "memory/iterator.inline.hpp" +#include "oops/arrayKlass.hpp" +#include "oops/klass.hpp" #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" @@ -149,41 +151,10 @@ return size; } - -#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -int ObjArrayKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ - return oop_oop_iterate(obj, closure); \ -} - -#if INCLUDE_ALL_GCS -#define ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ -int ObjArrayKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ - /* No reverse implementation ATM. */ \ - return oop_oop_iterate(obj, closure); \ -} -#else -#define ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) -#endif - -#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ - \ -int ObjArrayKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \ - return oop_oop_iterate_bounded(obj, closure, mr); \ -} - -#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r(OopClosureType, nv_suffix) \ - \ -int ObjArrayKlass::oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end) { \ - return oop_oop_iterate_range(obj, closure, start, end); \ -} - - -#define ALL_OBJ_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - ObjArrayKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \ - ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ - ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \ - ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r( OopClosureType, nv_suffix) - +#define ALL_OBJ_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + OOP_OOP_ITERATE_DEFN( ObjArrayKlass, OopClosureType, nv_suffix) \ + OOP_OOP_ITERATE_DEFN_BOUNDED( ObjArrayKlass, OopClosureType, nv_suffix) \ + OOP_OOP_ITERATE_DEFN_RANGE( ObjArrayKlass, OopClosureType, nv_suffix) \ + OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(ObjArrayKlass, OopClosureType, nv_suffix) #endif // SHARE_VM_OOPS_OBJARRAYKLASS_INLINE_HPP diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/oops/oop.inline.hpp --- a/hotspot/src/share/vm/oops/oop.inline.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/oops/oop.inline.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -741,7 +741,7 @@ } \ \ inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \ - return klass()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \ + return klass()->oop_oop_iterate_bounded##nv_suffix(this, blk, mr); \ } diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/oops/typeArrayKlass.hpp --- a/hotspot/src/share/vm/oops/typeArrayKlass.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/oops/typeArrayKlass.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -92,24 +92,24 @@ // The implementation used by all oop_oop_iterate functions in TypeArrayKlasses. inline int oop_oop_iterate_impl(oop obj, ExtendedOopClosure* closure); + // Wraps oop_oop_iterate_impl to conform to macros. + template + inline int oop_oop_iterate(oop obj, OopClosureType* closure); + + // Wraps oop_oop_iterate_impl to conform to macros. + template + inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr); + public: -#define TypeArrayKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure); \ - int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, \ - MemRegion mr); \ - int oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, \ - int start, int end); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(TypeArrayKlass_OOP_OOP_ITERATE_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(TypeArrayKlass_OOP_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_RANGE) + ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_RANGE) #if INCLUDE_ALL_GCS -#define TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_NO_BACKWARDS) + ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_NO_BACKWARDS) #endif // INCLUDE_ALL_GCS diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/oops/typeArrayKlass.inline.hpp --- a/hotspot/src/share/vm/oops/typeArrayKlass.inline.hpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/oops/typeArrayKlass.inline.hpp Wed Jul 05 20:37:12 2017 +0200 @@ -25,6 +25,8 @@ #ifndef SHARE_VM_OOPS_TYPEARRAYKLASS_INLINE_HPP #define SHARE_VM_OOPS_TYPEARRAYKLASS_INLINE_HPP +#include "oops/arrayKlass.hpp" +#include "oops/klass.hpp" #include "oops/oop.inline.hpp" #include "oops/typeArrayKlass.hpp" #include "oops/typeArrayOop.hpp" @@ -39,35 +41,19 @@ return t->object_size(); } -#define TypeArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -int TypeArrayKlass:: \ -oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ - return oop_oop_iterate_impl(obj, closure); \ +template +int TypeArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) { + return oop_oop_iterate_impl(obj, closure); } -#if INCLUDE_ALL_GCS -#define TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ - \ -int TypeArrayKlass:: \ -oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ - return oop_oop_iterate_impl(obj, closure); \ -} -#else -#define TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) -#endif - - -#define TypeArrayKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ - \ -int TypeArrayKlass:: \ -oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \ - return oop_oop_iterate_impl(obj, closure); \ +template +int TypeArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) { + return oop_oop_iterate_impl(obj, closure); } -#define ALL_TYPE_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - TypeArrayKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \ - TypeArrayKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \ - TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) +#define ALL_TYPE_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + OOP_OOP_ITERATE_DEFN( TypeArrayKlass, OopClosureType, nv_suffix) \ + OOP_OOP_ITERATE_DEFN_BOUNDED( TypeArrayKlass, OopClosureType, nv_suffix) \ + OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(TypeArrayKlass, OopClosureType, nv_suffix) #endif // SHARE_VM_OOPS_TYPEARRAYKLASS_INLINE_HPP diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/runtime/arguments.cpp --- a/hotspot/src/share/vm/runtime/arguments.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/runtime/arguments.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -1278,10 +1278,8 @@ // Preferred young gen size for "short" pauses: // upper bound depends on # of threads and NewRatio. - const uintx parallel_gc_threads = - (ParallelGCThreads == 0 ? 1 : ParallelGCThreads); const size_t preferred_max_new_size_unaligned = - MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * parallel_gc_threads)); + MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * ParallelGCThreads)); size_t preferred_max_new_size = align_size_up(preferred_max_new_size_unaligned, os::vm_page_size()); diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/src/share/vm/utilities/elfFile.cpp --- a/hotspot/src/share/vm/utilities/elfFile.cpp Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/src/share/vm/utilities/elfFile.cpp Wed Jul 05 20:37:12 2017 +0200 @@ -261,7 +261,12 @@ } } } +// AARCH64 defaults to noexecstack. All others default to execstack. +#ifdef AARCH64 + return true; +#else return false; +#endif } #endif diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/test/compiler/stable/StableConfiguration.java --- a/hotspot/test/compiler/stable/StableConfiguration.java Wed Jul 05 20:36:16 2017 +0200 +++ b/hotspot/test/compiler/stable/StableConfiguration.java Wed Jul 05 20:37:12 2017 +0200 @@ -41,10 +41,32 @@ System.out.println("Server Compiler: " + get()); } + // The method 'get' below returns true if the method is server compiled + // and is used by the Stable tests to determine whether methods in + // general are being server compiled or not as the -XX:+FoldStableValues + // option is only applicable to -server. + // + // On aarch64 we DeOptimize when patching. This means that when the + // method is compiled as a result of -Xcomp it DeOptimizes immediately. + // The result is that getMethodCompilationLevel returns 0. This means + // the method returns true based on java.vm.name. + // + // However when the tests are run with -XX:+TieredCompilation and + // -XX:TieredStopAtLevel=1 this fails because methods will always + // be client compiled. + // + // Solution is to add a simple method 'get1' which should never be + // DeOpted and use that to determine the compilation level instead. + static void get1() { + } + + + // ::get() is among immediately compiled methods. static boolean get() { try { - Method m = StableConfiguration.class.getDeclaredMethod("get"); + get1(); + Method m = StableConfiguration.class.getDeclaredMethod("get1"); int level = WB.getMethodCompilationLevel(m); if (level > 0) { return (level == 4); diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/test/serviceability/sa/TestClassLoaderStats.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/serviceability/sa/TestClassLoaderStats.java Wed Jul 05 20:37:12 2017 +0200 @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import jdk.test.lib.Platform; +import jdk.test.lib.ProcessTools; +import jdk.test.lib.OutputAnalyzer; + +/* + * @test + * @library /testlibrary + * @build jdk.test.lib.* + * @run main TestClassLoaderStats + */ +public class TestClassLoaderStats { + + public static void main(String[] args) throws Exception { + if (!Platform.shouldSAAttach()) { + System.out.println("SA attach not expected to work - test skipped."); + return; + } + + ProcessBuilder processBuilder = ProcessTools.createJavaProcessBuilder( + "-XX:+UsePerfData", + "sun.jvm.hotspot.tools.ClassLoaderStats", + Integer.toString(ProcessTools.getProcessId())); + OutputAnalyzer output = ProcessTools.executeProcess(processBuilder); + System.out.println(output.getOutput()); + + output.shouldHaveExitValue(0); + output.shouldContain("Debugger attached successfully."); + // The class loader stats header needs to be presented in the output: + output.shouldMatch("class_loader\\W+classes\\W+bytes\\W+parent_loader\\W+alive?\\W+type"); + output.stderrShouldNotMatch("[E|e]xception"); + output.stderrShouldNotMatch("[E|e]rror"); + } + +} diff -r 1c0a1cee6054 -r 5b500c93ce48 hotspot/test/serviceability/sa/TestStackTrace.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/serviceability/sa/TestStackTrace.java Wed Jul 05 20:37:12 2017 +0200 @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import jdk.test.lib.OutputAnalyzer; +import jdk.test.lib.Platform; +import jdk.test.lib.ProcessTools; + +/* + * @test + * @library /testlibrary + * @build jdk.test.lib.* + * @run main TestStackTrace + */ +public class TestStackTrace { + + public static void main(String[] args) throws Exception { + if (!Platform.shouldSAAttach()) { + System.out.println("SA attach not expected to work - test skipped."); + return; + } + + ProcessBuilder processBuilder = ProcessTools.createJavaProcessBuilder( + "-XX:+UsePerfData", + "sun.jvm.hotspot.tools.StackTrace", + Integer.toString(ProcessTools.getProcessId())); + OutputAnalyzer output = ProcessTools.executeProcess(processBuilder); + System.out.println(output.getOutput()); + + output.shouldHaveExitValue(0); + output.shouldContain("Debugger attached successfully."); + output.stderrShouldNotMatch("[E|e]xception"); + output.stderrShouldNotMatch("[E|e]rror"); + } + +} diff -r 1c0a1cee6054 -r 5b500c93ce48 jaxp/.hgtags --- a/jaxp/.hgtags Wed Jul 05 20:36:16 2017 +0200 +++ b/jaxp/.hgtags Wed Jul 05 20:37:12 2017 +0200 @@ -309,3 +309,4 @@ 6f91749b5aaef1a171ec2254163233438d1071d1 jdk9-b64 ae7406e82828fe1c245ac7507a9da5fd5b1c9529 jdk9-b65 d5963ccce28d7a3e96ee3e2dc8a8676e61699b70 jdk9-b66 +78c2685daabafae827c686ca2d1bb2e451faed2b jdk9-b67 diff -r 1c0a1cee6054 -r 5b500c93ce48 jaxp/src/java.xml/share/classes/com/sun/org/apache/xml/internal/dtm/ref/DTMNodeProxy.java --- a/jaxp/src/java.xml/share/classes/com/sun/org/apache/xml/internal/dtm/ref/DTMNodeProxy.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jaxp/src/java.xml/share/classes/com/sun/org/apache/xml/internal/dtm/ref/DTMNodeProxy.java Wed Jul 05 20:37:12 2017 +0200 @@ -2116,7 +2116,7 @@ */ @Override public String getTextContent() throws DOMException { - return getNodeValue(); // overriden in some subclasses + return dtm.getStringValue(node).toString(); } /** diff -r 1c0a1cee6054 -r 5b500c93ce48 jaxp/src/java.xml/share/classes/com/sun/org/apache/xml/internal/dtm/ref/sax2dtm/SAX2DTM2.java --- a/jaxp/src/java.xml/share/classes/com/sun/org/apache/xml/internal/dtm/ref/sax2dtm/SAX2DTM2.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jaxp/src/java.xml/share/classes/com/sun/org/apache/xml/internal/dtm/ref/sax2dtm/SAX2DTM2.java Wed Jul 05 20:37:12 2017 +0200 @@ -3145,11 +3145,7 @@ m_data.elementAt(-dataIndex+1)); } } - else if (DTM.ELEMENT_NODE == type) - { - return getStringValueX(nodeHandle); - } - else if (DTM.DOCUMENT_FRAGMENT_NODE == type + else if (DTM.ELEMENT_NODE == type || DTM.DOCUMENT_FRAGMENT_NODE == type || DTM.DOCUMENT_NODE == type) { return null; diff -r 1c0a1cee6054 -r 5b500c93ce48 jaxp/test/ProblemList.txt --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/jaxp/test/ProblemList.txt Wed Jul 05 20:37:12 2017 +0200 @@ -0,0 +1,26 @@ +########################################################################### +# +# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +########################################################################### + +# No jaxp tests are on the problem list. diff -r 1c0a1cee6054 -r 5b500c93ce48 jaxp/test/TEST.ROOT --- a/jaxp/test/TEST.ROOT Wed Jul 05 20:36:16 2017 +0200 +++ b/jaxp/test/TEST.ROOT Wed Jul 05 20:37:12 2017 +0200 @@ -1,8 +1,21 @@ # This file identifies the root of the test-suite hierarchy. # It also contains test-suite configuration information. +# The list of keywords supported in the entire test suite. The +# "intermittent" keyword marks tests known to fail intermittently. +# The "randomness" keyword marks tests using randomness with test +# cases differing from run to run. (A test using a fixed random seed +# would not count as "randomness" by this definition.) Extra care +# should be taken to handle test failures of intermittent or +# randomness tests. + +keys=intermittent randomness + # Tests that must run in othervm mode othervm.dirs=javax/xml/jaxp # Group definitions groups=TEST.groups + +# Minimum jtreg version +requiredVersion=4.1 b11 diff -r 1c0a1cee6054 -r 5b500c93ce48 jaxp/test/TEST.groups --- a/jaxp/test/TEST.groups Wed Jul 05 20:36:16 2017 +0200 +++ b/jaxp/test/TEST.groups Wed Jul 05 20:37:12 2017 +0200 @@ -1,4 +1,4 @@ -# Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -20,5 +20,14 @@ # questions. # +# Tiered testing definitions + +# No jaxp tests are tier 1. +tier1 = + +# All jaxp tests are tier 2. +tier2 = \ + :jaxp_all + jaxp_all = \ javax/xml/jaxp diff -r 1c0a1cee6054 -r 5b500c93ce48 jaxp/test/javax/xml/jaxp/unittest/javax/xml/transform/DocumentExtFunc.java --- a/jaxp/test/javax/xml/jaxp/unittest/javax/xml/transform/DocumentExtFunc.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jaxp/test/javax/xml/jaxp/unittest/javax/xml/transform/DocumentExtFunc.java Wed Jul 05 20:37:12 2017 +0200 @@ -30,6 +30,6 @@ public static String test(NodeList list) { Node node = list.item(0); - return "["+node.getNodeName() + ":" + node.getNodeValue()+"]"; + return "["+node.getNodeName() + ":" + node.getTextContent()+"]"; } } diff -r 1c0a1cee6054 -r 5b500c93ce48 jaxws/.hgtags --- a/jaxws/.hgtags Wed Jul 05 20:36:16 2017 +0200 +++ b/jaxws/.hgtags Wed Jul 05 20:37:12 2017 +0200 @@ -312,3 +312,4 @@ df100399ed27d0eaa57c137ca99819a0fee66178 jdk9-b64 45ef73bb85c12ec1b291835c1d40e342a454e3f0 jdk9-b65 1232f4013417e4a9cd291096798d10f2e601d69d jdk9-b66 +c9785bc8ade98a16a050d7520b70c68363857e00 jdk9-b67 diff -r 1c0a1cee6054 -r 5b500c93ce48 jaxws/src/java.xml.ws/share/classes/com/sun/xml/internal/ws/assembler/MetroConfigLoader.java --- a/jaxws/src/java.xml.ws/share/classes/com/sun/xml/internal/ws/assembler/MetroConfigLoader.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jaxws/src/java.xml.ws/share/classes/com/sun/xml/internal/ws/assembler/MetroConfigLoader.java Wed Jul 05 20:37:12 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,6 +41,8 @@ import javax.xml.bind.Unmarshaller; import javax.xml.stream.XMLInputFactory; import javax.xml.ws.WebServiceException; +import java.io.IOException; +import java.io.InputStream; import java.lang.reflect.Method; import java.net.MalformedURLException; import java.net.URI; @@ -64,6 +66,7 @@ // TODO Move the logic of this class directly into MetroConfig class. class MetroConfigLoader { + private static final String JAXWS_TUBES_JDK_XML_RESOURCE = "jaxws-tubes-default.xml"; private static final Logger LOGGER = Logger.getLogger(MetroConfigLoader.class); private MetroConfigName defaultTubesConfigNames; @@ -122,11 +125,10 @@ defaultFileName = defaultTubesConfigNames.getDefaultFileName(); } this.defaultConfigUrl = locateResource(defaultFileName, loaders); - if (defaultConfigUrl == null) { - throw LOGGER.logSevereException(new IllegalStateException(TubelineassemblyMessages.MASM_0001_DEFAULT_CFG_FILE_NOT_FOUND(defaultFileName))); + if (defaultConfigUrl != null) { + LOGGER.config(TubelineassemblyMessages.MASM_0002_DEFAULT_CFG_FILE_LOCATED(defaultFileName, defaultConfigUrl)); } - LOGGER.config(TubelineassemblyMessages.MASM_0002_DEFAULT_CFG_FILE_LOCATED(defaultFileName, defaultConfigUrl)); this.defaultConfig = MetroConfigLoader.loadMetroConfig(defaultConfigUrl); if (defaultConfig == null) { throw LOGGER.logSevereException(new IllegalStateException(TubelineassemblyMessages.MASM_0003_DEFAULT_CFG_FILE_NOT_LOADED(defaultFileName))); @@ -235,17 +237,35 @@ } private static MetroConfig loadMetroConfig(@NotNull URL resourceUrl) { - MetroConfig result = null; - try { + try (InputStream is = getConfigInputStream(resourceUrl)) { JAXBContext jaxbContext = createJAXBContext(); Unmarshaller unmarshaller = jaxbContext.createUnmarshaller(); XMLInputFactory factory = XmlUtil.newXMLInputFactory(true); - final JAXBElement configElement = unmarshaller.unmarshal(factory.createXMLStreamReader(resourceUrl.openStream()), MetroConfig.class); - result = configElement.getValue(); + JAXBElement configElement = unmarshaller.unmarshal(factory.createXMLStreamReader(is), MetroConfig.class); + return configElement.getValue(); } catch (Exception e) { - LOGGER.warning(TubelineassemblyMessages.MASM_0010_ERROR_READING_CFG_FILE_FROM_LOCATION(resourceUrl.toString()), e); + String message = TubelineassemblyMessages.MASM_0010_ERROR_READING_CFG_FILE_FROM_LOCATION( + resourceUrl != null ? resourceUrl.toString() : null); + InternalError error = new InternalError(message); + LOGGER.logException(error, e, Level.SEVERE); + throw error; } - return result; + } + + private static InputStream getConfigInputStream(URL resourceUrl) throws IOException { + InputStream is; + if (resourceUrl != null) { + is = resourceUrl.openStream(); + } else { + is = MetroConfigLoader.class.getResourceAsStream(JAXWS_TUBES_JDK_XML_RESOURCE); + + if (is == null) + throw LOGGER.logSevereException( + new IllegalStateException( + TubelineassemblyMessages.MASM_0001_DEFAULT_CFG_FILE_NOT_FOUND(JAXWS_TUBES_JDK_XML_RESOURCE))); + } + + return is; } private static JAXBContext createJAXBContext() throws Exception { diff -r 1c0a1cee6054 -r 5b500c93ce48 jaxws/src/jdk.xml.bind/share/classes/com/sun/tools/internal/jxc/ConfigReader.java --- a/jaxws/src/jdk.xml.bind/share/classes/com/sun/tools/internal/jxc/ConfigReader.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jaxws/src/jdk.xml.bind/share/classes/com/sun/tools/internal/jxc/ConfigReader.java Wed Jul 05 20:37:12 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -181,7 +181,7 @@ /** * Lazily parsed schema for the binding file. */ - private static SchemaCache configSchema = new SchemaCache(Config.class.getResource("config.xsd")); + private static SchemaCache configSchema = new SchemaCache("config.xsd", Config.class); /** diff -r 1c0a1cee6054 -r 5b500c93ce48 jaxws/src/jdk.xml.bind/share/classes/com/sun/tools/internal/xjc/SchemaCache.java --- a/jaxws/src/jdk.xml.bind/share/classes/com/sun/tools/internal/xjc/SchemaCache.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jaxws/src/jdk.xml.bind/share/classes/com/sun/tools/internal/xjc/SchemaCache.java Wed Jul 05 20:37:12 2017 +0200 @@ -25,14 +25,23 @@ package com.sun.tools.internal.xjc; -import java.net.URL; +import java.io.IOException; +import java.io.InputStream; +import java.io.Reader; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import javax.xml.transform.stream.StreamSource; import javax.xml.validation.Schema; import javax.xml.validation.SchemaFactory; import javax.xml.validation.ValidatorHandler; import com.sun.xml.internal.bind.v2.util.XmlFactory; import javax.xml.XMLConstants; + +import org.w3c.dom.ls.LSInput; +import org.w3c.dom.ls.LSResourceResolver; import org.xml.sax.SAXException; import static com.sun.xml.internal.bind.v2.util.XmlFactory.allowExternalAccess; @@ -47,30 +56,170 @@ */ public final class SchemaCache { + private final boolean createResolver; + private final String resourceName; + private final Class clazz; + private Schema schema; - private final URL source; + public SchemaCache(String resourceName, Class classToResolveResources) { + this(resourceName, classToResolveResources, false); + } - public SchemaCache(URL source) { - this.source = source; + public SchemaCache(String resourceName, Class classToResolveResources, boolean createResolver) { + this.resourceName = resourceName; + this.createResolver = createResolver; + this.clazz = classToResolveResources; } public ValidatorHandler newValidator() { - synchronized(this) { - if(schema==null) { - try { - // do not disable secure processing - these are well-known schemas - SchemaFactory sf = XmlFactory.createSchemaFactory(XMLConstants.W3C_XML_SCHEMA_NS_URI, false); - schema = allowExternalAccess(sf, "file", false).newSchema(source); - } catch (SAXException e) { - // we make sure that the schema is correct before we ship. - throw new AssertionError(e); + if (schema==null) { + synchronized (this) { + if (schema == null) { + + ResourceResolver resourceResolver = null; + try (InputStream is = clazz.getResourceAsStream(resourceName)) { + + StreamSource source = new StreamSource(is); + source.setSystemId(resourceName); + // do not disable secure processing - these are well-known schemas + + SchemaFactory sf = XmlFactory.createSchemaFactory(XMLConstants.W3C_XML_SCHEMA_NS_URI, false); + SchemaFactory schemaFactory = allowExternalAccess(sf, "file", false); + + if (createResolver) { + resourceResolver = new ResourceResolver(clazz); + schemaFactory.setResourceResolver(resourceResolver); + } + schema = schemaFactory.newSchema(source); + + } catch (IOException | SAXException e) { + throw new InternalError(e); + } finally { + if (resourceResolver != null) resourceResolver.closeStreams(); + } + } + } + } + return schema.newValidatorHandler(); + } + + class ResourceResolver implements LSResourceResolver { + + private List streamsToClose = Collections.synchronizedList(new ArrayList()); + private Class clazz; + + ResourceResolver(Class clazz) { + this.clazz = clazz; + } + + @Override + public LSInput resolveResource(String type, String namespaceURI, String publicId, String systemId, String baseURI) { + // XSOM passes the namespace URI to the publicID parameter. + // we do the same here . + InputStream is = clazz.getResourceAsStream(systemId); + streamsToClose.add(is); + return new Input(is, publicId, systemId); + } + + void closeStreams() { + for (InputStream is : streamsToClose) { + if (is != null) { + try { + is.close(); + } catch (IOException e) { + // nothing to do ... + } } } } - - ValidatorHandler handler = schema.newValidatorHandler(); - return handler; } } + +class Input implements LSInput { + + private InputStream is; + private String publicId; + private String systemId; + + public Input(InputStream is, String publicId, String systemId) { + this.is = is; + this.publicId = publicId; + this.systemId = systemId; + } + + @Override + public Reader getCharacterStream() { + return null; + } + + @Override + public void setCharacterStream(Reader characterStream) { + } + + @Override + public InputStream getByteStream() { + return is; + } + + @Override + public void setByteStream(InputStream byteStream) { + } + + @Override + public String getStringData() { + return null; + } + + @Override + public void setStringData(String stringData) { + } + + @Override + public String getSystemId() { + return systemId; + } + + @Override + public void setSystemId(String systemId) { + } + + @Override + public String getPublicId() { + return publicId; + } + + @Override + public void setPublicId(String publicId) { + } + + @Override + public String getBaseURI() { + return null; + } + + @Override + public void setBaseURI(String baseURI) { + } + + @Override + public String getEncoding() { + return null; + } + + @Override + public void setEncoding(String encoding) { + } + + @Override + public boolean getCertifiedText() { + return false; + } + + @Override + public void setCertifiedText(boolean certifiedText) { + } +} + + diff -r 1c0a1cee6054 -r 5b500c93ce48 jaxws/src/jdk.xml.bind/share/classes/com/sun/tools/internal/xjc/reader/dtd/bindinfo/BindInfo.java --- a/jaxws/src/jdk.xml.bind/share/classes/com/sun/tools/internal/xjc/reader/dtd/bindinfo/BindInfo.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jaxws/src/jdk.xml.bind/share/classes/com/sun/tools/internal/xjc/reader/dtd/bindinfo/BindInfo.java Wed Jul 05 20:37:12 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -289,7 +289,7 @@ /** * Lazily parsed schema for the binding file. */ - private static SchemaCache bindingFileSchema = new SchemaCache(BindInfo.class.getResource("bindingfile.xsd")); + private static SchemaCache bindingFileSchema = new SchemaCache("bindingfile.xsd", BindInfo.class); /** * Parses an InputSource into dom4j Document. diff -r 1c0a1cee6054 -r 5b500c93ce48 jaxws/src/jdk.xml.bind/share/classes/com/sun/tools/internal/xjc/reader/xmlschema/bindinfo/BindInfo.java --- a/jaxws/src/jdk.xml.bind/share/classes/com/sun/tools/internal/xjc/reader/xmlschema/bindinfo/BindInfo.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jaxws/src/jdk.xml.bind/share/classes/com/sun/tools/internal/xjc/reader/xmlschema/bindinfo/BindInfo.java Wed Jul 05 20:37:12 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -353,5 +353,5 @@ /** * Lazily parsed schema for the binding file. */ - public static final SchemaCache bindingFileSchema = new SchemaCache(BindInfo.class.getResource("binding.xsd")); + public static SchemaCache bindingFileSchema = new SchemaCache("binding.xsd", BindInfo.class, true); } diff -r 1c0a1cee6054 -r 5b500c93ce48 jaxws/src/jdk.xml.bind/share/classes/com/sun/xml/internal/xsom/impl/parser/ParserContext.java --- a/jaxws/src/jdk.xml.bind/share/classes/com/sun/xml/internal/xsom/impl/parser/ParserContext.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jaxws/src/jdk.xml.bind/share/classes/com/sun/xml/internal/xsom/impl/parser/ParserContext.java Wed Jul 05 20:37:12 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,6 +39,8 @@ import org.xml.sax.SAXException; import org.xml.sax.SAXParseException; +import java.io.IOException; +import java.io.InputStream; import java.util.HashMap; import java.util.Iterator; import java.util.Map; @@ -81,20 +83,18 @@ this.owner = owner; this.parser = parser; - try { - parse(new InputSource(ParserContext.class.getResource("datatypes.xsd").toExternalForm())); + try (InputStream is = ParserContext.class.getResourceAsStream("datatypes.xsd")) { + InputSource source = new InputSource(is); + source.setSystemId("datatypes.xsd"); + parse(source); SchemaImpl xs = (SchemaImpl) - schemaSet.getSchema("http://www.w3.org/2001/XMLSchema"); + schemaSet.getSchema("http://www.w3.org/2001/XMLSchema"); xs.addSimpleType(schemaSet.anySimpleType,true); xs.addComplexType(schemaSet.anyType,true); - } catch( SAXException e ) { + } catch( SAXException | IOException e ) { // this must be a bug of XSOM - if(e.getException()!=null) - e.getException().printStackTrace(); - else - e.printStackTrace(); - throw new InternalError(); + throw new InternalError(e.getMessage()); } } diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/.hgtags --- a/jdk/.hgtags Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/.hgtags Wed Jul 05 20:37:12 2017 +0200 @@ -309,3 +309,4 @@ 7de8d036ad0980d988d1b9b4b4e6be555d9fbf98 jdk9-b64 ed94f3e7ba6bbfec0772de6d24e39543e13f6d88 jdk9-b65 4fbcca8ab812198c7fb747ea7b213b6e404f36e9 jdk9-b66 +1abd45df5480a04bff98fba1851d66a5230e67d4 jdk9-b67 diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/make/mapfiles/libjava/mapfile-vers --- a/jdk/make/mapfiles/libjava/mapfile-vers Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/make/mapfiles/libjava/mapfile-vers Wed Jul 05 20:37:12 2017 +0200 @@ -166,6 +166,16 @@ Java_java_lang_Package_getSystemPackage0; Java_java_lang_Package_getSystemPackages0; Java_java_lang_ProcessEnvironment_environ; + Java_java_lang_ProcessHandleImpl_getCurrentPid0; + Java_java_lang_ProcessHandleImpl_parent0; + Java_java_lang_ProcessHandleImpl_isAlive0; + Java_java_lang_ProcessHandleImpl_getProcessPids0; + Java_java_lang_ProcessHandleImpl_destroy0; + Java_java_lang_ProcessHandleImpl_waitForProcessExit0; + Java_java_lang_ProcessHandleImpl_00024Info_initIDs; + Java_java_lang_ProcessHandleImpl_00024Info_info0; + Java_java_lang_ProcessImpl_init; + Java_java_lang_ProcessImpl_forkAndExec; Java_java_lang_reflect_Array_get; Java_java_lang_reflect_Array_getBoolean; Java_java_lang_reflect_Array_getByte; @@ -214,10 +224,6 @@ Java_java_lang_Throwable_fillInStackTrace; Java_java_lang_Throwable_getStackTraceDepth; Java_java_lang_Throwable_getStackTraceElement; - Java_java_lang_ProcessImpl_init; - Java_java_lang_ProcessImpl_waitForProcessExit; - Java_java_lang_ProcessImpl_forkAndExec; - Java_java_lang_ProcessImpl_destroyProcess; Java_java_nio_Bits_copyFromShortArray; Java_java_nio_Bits_copyToShortArray; Java_java_nio_Bits_copyFromIntArray; @@ -277,7 +283,7 @@ Java_jdk_internal_jimage_concurrent_ConcurrentPReader_initIDs; Java_jdk_internal_jimage_concurrent_ConcurrentPReader_pread; - + # ZipFile.c needs this one throwFileNotFoundException; # zip_util.c needs this one diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/make/mapfiles/libnet/mapfile-vers --- a/jdk/make/mapfiles/libnet/mapfile-vers Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/make/mapfiles/libnet/mapfile-vers Wed Jul 05 20:37:12 2017 +0200 @@ -42,7 +42,7 @@ Java_java_net_Inet4Address_init; Java_java_net_Inet6Address_init; Java_java_net_PlainDatagramSocketImpl_setTTL; - Java_java_net_PlainDatagramSocketImpl_socketSetOption; + Java_java_net_PlainDatagramSocketImpl_socketSetOption0; Java_java_net_PlainDatagramSocketImpl_bind0; Java_java_net_PlainSocketImpl_socketAccept; Java_java_net_DatagramPacket_init; @@ -73,7 +73,7 @@ Java_java_net_SocketOutputStream_init; Java_java_net_PlainDatagramSocketImpl_peek; Java_java_net_PlainDatagramSocketImpl_peekData; - Java_java_net_PlainSocketImpl_socketSetOption; + Java_java_net_PlainSocketImpl_socketSetOption0; Java_java_net_PlainSocketImpl_socketSendUrgentData; Java_java_net_PlainDatagramSocketImpl_datagramSocketCreate; Java_java_net_PlainSocketImpl_socketGetOption; diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/macosx/native/libjava/ProcessHandleImpl_macosx.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/jdk/src/java.base/macosx/native/libjava/ProcessHandleImpl_macosx.c Wed Jul 05 20:37:12 2017 +0200 @@ -0,0 +1,401 @@ +/* + * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "jni.h" +#include "jni_util.h" +#include "java_lang_ProcessHandleImpl.h" +#include "java_lang_ProcessHandleImpl_Info.h" + +#include +#include +#include +#include +#include +#include + +#include + +/** + * Implementations of ProcessHandleImpl functions for MAC OS X; + * are NOT common to all Unix variants. + */ + +static void getStatInfo(JNIEnv *env, jobject jinfo, pid_t pid); +static void getCmdlineInfo(JNIEnv *env, jobject jinfo, pid_t pid); + +/* + * Common Unix function to lookup the uid and return the user name. + */ +extern jstring uidToUser(JNIEnv* env, uid_t uid); + +/* Field id for jString 'command' in java.lang.ProcessHandle.Info */ +static jfieldID ProcessHandleImpl_Info_commandID; + +/* Field id for jString[] 'arguments' in java.lang.ProcessHandle.Info */ +static jfieldID ProcessHandleImpl_Info_argumentsID; + +/* Field id for jlong 'totalTime' in java.lang.ProcessHandle.Info */ +static jfieldID ProcessHandleImpl_Info_totalTimeID; + +/* Field id for jlong 'startTime' in java.lang.ProcessHandle.Info */ +static jfieldID ProcessHandleImpl_Info_startTimeID; + +/* Field id for jString 'user' in java.lang.ProcessHandleImpl.Info */ +static jfieldID ProcessHandleImpl_Info_userID; + +/* static value for clock ticks per second. */ +static long clock_ticks_per_second; + +/************************************************************** + * Static method to initialize field IDs and the ticks per second rate. + * + * Class: java_lang_ProcessHandleImpl_Info + * Method: initIDs + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_java_lang_ProcessHandleImpl_00024Info_initIDs + (JNIEnv *env, jclass clazz) { + + CHECK_NULL(ProcessHandleImpl_Info_commandID = + (*env)->GetFieldID(env, clazz, "command", "Ljava/lang/String;")); + CHECK_NULL(ProcessHandleImpl_Info_argumentsID = + (*env)->GetFieldID(env, clazz, "arguments", "[Ljava/lang/String;")); + CHECK_NULL(ProcessHandleImpl_Info_totalTimeID = + (*env)->GetFieldID(env, clazz, "totalTime", "J")); + CHECK_NULL(ProcessHandleImpl_Info_startTimeID = + (*env)->GetFieldID(env, clazz, "startTime", "J")); + CHECK_NULL(ProcessHandleImpl_Info_userID = + (*env)->GetFieldID(env, clazz, "user", "Ljava/lang/String;")); + clock_ticks_per_second = sysconf(_SC_CLK_TCK); +} + +/* + * Returns the parent pid of the requested pid. + * + * Class: java_lang_ProcessHandleImpl + * Method: parent0 + * Signature: (J)J + */ +JNIEXPORT jlong JNICALL Java_java_lang_ProcessHandleImpl_parent0 +(JNIEnv *env, jobject obj, jlong jpid) { + pid_t pid = (pid_t) jpid; + pid_t ppid = -1; + + if (pid == getpid()) { + ppid = getppid(); + } else { + const pid_t pid = (pid_t) jpid; + struct kinfo_proc kp; + size_t bufSize = sizeof kp; + + // Read the process info for the specific pid + int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid}; + if (sysctl(mib, 4, &kp, &bufSize, NULL, 0) < 0) { + JNU_ThrowByNameWithLastError(env, + "java/lang/RuntimeException", "sysctl failed"); + return -1; + } + ppid = (bufSize > 0 && kp.kp_proc.p_pid == pid) ? kp.kp_eproc.e_ppid : -1; + } + return (jlong) ppid; +} + +/* + * Returns the children of the requested pid and optionally each parent. + * + * Class: java_lang_ProcessHandleImpl + * Method: getProcessPids0 + * Signature: (J[J[J)I + * + * Use sysctl to accumulate any process whose parent pid is zero or matches. + * The resulting pids are stored into the array of longs. + * The number of pids is returned if they all fit. + * If the parentArray is non-null, store the parent pid. + * If the array is too short, excess pids are not stored and + * the desired length is returned. + */ +JNIEXPORT jint JNICALL Java_java_lang_ProcessHandleImpl_getProcessPids0 +(JNIEnv *env, jclass clazz, jlong jpid, + jlongArray jarray, jlongArray jparentArray) +{ + size_t count = 0; + jlong* pids = NULL; + jlong* ppids = NULL; + size_t parentArraySize = 0; + size_t arraySize = 0; + size_t bufSize = 0; + pid_t pid = (pid_t) jpid; + + arraySize = (*env)->GetArrayLength(env, jarray); + JNU_CHECK_EXCEPTION_RETURN(env, -1); + if (jparentArray != NULL) { + parentArraySize = (*env)->GetArrayLength(env, jparentArray); + JNU_CHECK_EXCEPTION_RETURN(env, -1); + + if (arraySize != parentArraySize) { + JNU_ThrowIllegalArgumentException(env, "array sizes not equal"); + return 0; + } + } + + // Get buffer size needed to read all processes + int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_ALL, 0}; + if (sysctl(mib, 4, NULL, &bufSize, NULL, 0) < 0) { + JNU_ThrowByNameWithLastError(env, + "java/lang/RuntimeException", "sysctl failed"); + return -1; + } + + // Allocate buffer big enough for all processes + void *buffer = malloc(bufSize); + if (buffer == NULL) { + JNU_ThrowOutOfMemoryError(env, "malloc failed"); + return -1; + } + + // Read process info for all processes + if (sysctl(mib, 4, buffer, &bufSize, NULL, 0) < 0) { + JNU_ThrowByNameWithLastError(env, + "java/lang/RuntimeException", "sysctl failed"); + free(buffer); + return -1; + } + + do { // Block to break out of on Exception + struct kinfo_proc *kp = (struct kinfo_proc *) buffer; + unsigned long nentries = bufSize / sizeof (struct kinfo_proc); + long i; + + pids = (*env)->GetLongArrayElements(env, jarray, NULL); + if (pids == NULL) { + break; + } + if (jparentArray != NULL) { + ppids = (*env)->GetLongArrayElements(env, jparentArray, NULL); + if (ppids == NULL) { + break; + } + } + + // Process each entry in the buffer + for (i = nentries; --i >= 0; ++kp) { + if (pid == 0 || kp->kp_eproc.e_ppid == pid) { + if (count < arraySize) { + // Only store if it fits + pids[count] = (jlong) kp->kp_proc.p_pid; + if (ppids != NULL) { + // Store the parentPid + ppids[count] = (jlong) kp->kp_eproc.e_ppid; + } + } + count++; // Count to tabulate size needed + } + } + } while (0); + + if (pids != NULL) { + (*env)->ReleaseLongArrayElements(env, jarray, pids, 0); + } + if (ppids != NULL) { + (*env)->ReleaseLongArrayElements(env, jparentArray, ppids, 0); + } + + free(buffer); + // If more pids than array had size for; count will be greater than array size + return count; +} + +/************************************************************** + * Implementation of ProcessHandleImpl_Info native methods. + */ + +/* + * Fill in the Info object from the OS information about the process. + * + * Class: java_lang_ProcessHandleImpl + * Method: info0 + * Signature: (J)I + */ +JNIEXPORT void JNICALL Java_java_lang_ProcessHandleImpl_00024Info_info0 + (JNIEnv *env, jobject jinfo, jlong jpid) { + pid_t pid = (pid_t) jpid; + getStatInfo(env, jinfo, pid); + getCmdlineInfo(env, jinfo, pid); +} + +/** + * Read /proc//stat and fill in the fields of the Info object. + * The executable name, plus the user, system, and start times are gathered. + */ +static void getStatInfo(JNIEnv *env, jobject jinfo, pid_t jpid) { + jlong totalTime; // nanoseconds + unsigned long long startTime; // microseconds + + const pid_t pid = (pid_t) jpid; + struct kinfo_proc kp; + size_t bufSize = sizeof kp; + + // Read the process info for the specific pid + int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid}; + + if (sysctl(mib, 4, &kp, &bufSize, NULL, 0) < 0) { + if (errno == EINVAL) { + return; + } else { + JNU_ThrowByNameWithLastError(env, + "java/lang/RuntimeException", "sysctl failed"); + } + return; + } + + // Convert the UID to the username + jstring name = NULL; + CHECK_NULL((name = uidToUser(env, kp.kp_eproc.e_ucred.cr_uid))); + (*env)->SetObjectField(env, jinfo, ProcessHandleImpl_Info_userID, name); + JNU_CHECK_EXCEPTION(env); + + startTime = kp.kp_proc.p_starttime.tv_sec * 1000 + + kp.kp_proc.p_starttime.tv_usec / 1000; + + (*env)->SetLongField(env, jinfo, ProcessHandleImpl_Info_startTimeID, startTime); + JNU_CHECK_EXCEPTION(env); + + // Get cputime if for current process + if (pid == getpid()) { + struct rusage usage; + if (getrusage(RUSAGE_SELF, &usage) != 0) { + return; + } + jlong microsecs = + usage.ru_utime.tv_sec * 1000 * 1000 + usage.ru_utime.tv_usec + + usage.ru_stime.tv_sec * 1000 * 1000 + usage.ru_stime.tv_usec; + totalTime = microsecs * 1000; + (*env)->SetLongField(env, jinfo, ProcessHandleImpl_Info_totalTimeID, totalTime); + JNU_CHECK_EXCEPTION(env); + } +} + +/** + * Construct the argument array by parsing the arguments from the sequence of arguments. + */ +static int fillArgArray(JNIEnv *env, jobject jinfo, int nargs, + const char *cp, const char *argsEnd) { + jstring str = NULL; + jobject argsArray; + int i; + + if (nargs < 1) { + return 0; + } + // Create a String array for nargs-1 elements + CHECK_NULL_RETURN((argsArray = (*env)->NewObjectArray(env, + nargs - 1, JNU_ClassString(env), NULL)), -1); + + for (i = 0; i < nargs - 1; i++) { + // skip to the next argument; omits arg[0] + cp += strnlen(cp, (argsEnd - cp)) + 1; + + if (cp > argsEnd || *cp == '\0') { + return -2; // Off the end pointer or an empty argument is an error + } + + CHECK_NULL_RETURN((str = JNU_NewStringPlatform(env, cp)), -1); + + (*env)->SetObjectArrayElement(env, argsArray, i, str); + JNU_CHECK_EXCEPTION_RETURN(env, -3); + } + (*env)->SetObjectField(env, jinfo, ProcessHandleImpl_Info_argumentsID, argsArray); + JNU_CHECK_EXCEPTION_RETURN(env, -4); + return 0; +} + +/** + * Retrieve the command and arguments for the process and store them + * into the Info object. + */ +static void getCmdlineInfo(JNIEnv *env, jobject jinfo, pid_t pid) { + int mib[3], maxargs, nargs, i; + size_t size; + char *args, *cp, *sp, *np; + + // Get the maximum size of the arguments + mib[0] = CTL_KERN; + mib[1] = KERN_ARGMAX; + size = sizeof(maxargs); + if (sysctl(mib, 2, &maxargs, &size, NULL, 0) == -1) { + JNU_ThrowByNameWithLastError(env, + "java/lang/RuntimeException", "sysctl failed"); + return; + } + + // Allocate an args buffer and get the arguments + args = (char *)malloc(maxargs); + if (args == NULL) { + JNU_ThrowOutOfMemoryError(env, "malloc failed"); + return; + } + + do { // a block to break out of on error + char *argsEnd; + jstring str = NULL; + + mib[0] = CTL_KERN; + mib[1] = KERN_PROCARGS2; + mib[2] = pid; + size = (size_t) maxargs; + if (sysctl(mib, 3, args, &size, NULL, 0) == -1) { + if (errno != EINVAL) { + JNU_ThrowByNameWithLastError(env, + "java/lang/RuntimeException", "sysctl failed"); + } + break; + } + memcpy(&nargs, args, sizeof(nargs)); + + cp = &args[sizeof(nargs)]; // Strings start after nargs + argsEnd = &args[size]; + + // Store the command executable path + if ((str = JNU_NewStringPlatform(env, cp)) == NULL) { + break; + } + (*env)->SetObjectField(env, jinfo, ProcessHandleImpl_Info_commandID, str); + if ((*env)->ExceptionCheck(env)) { + break; + } + + // Skip trailing nulls after the executable path + for (cp = cp + strnlen(cp, argsEnd - cp); cp < argsEnd; cp++) { + if (*cp != '\0') { + break; + } + } + + fillArgArray(env, jinfo, nargs, cp, argsEnd); + } while (0); + // Free the arg buffer + free(args); +} + diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/java/io/InputStream.java --- a/jdk/src/java.base/share/classes/java/io/InputStream.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/src/java.base/share/classes/java/io/InputStream.java Wed Jul 05 20:37:12 2017 +0200 @@ -25,6 +25,7 @@ package java.io; +import java.util.Arrays; import java.util.Objects; /** @@ -50,7 +51,7 @@ // use when skipping. private static final int MAX_SKIP_BUFFER_SIZE = 2048; - private static final int TRANSFER_BUFFER_SIZE = 8192; + private static final int DEFAULT_BUFFER_SIZE = 8192; /** * Reads the next byte of data from the input stream. The value byte is @@ -192,6 +193,128 @@ } /** + * The maximum size of array to allocate. + * Some VMs reserve some header words in an array. + * Attempts to allocate larger arrays may result in + * OutOfMemoryError: Requested array size exceeds VM limit + */ + private static final int MAX_BUFFER_SIZE = Integer.MAX_VALUE - 8; + + /** + * Reads all remaining bytes from the input stream. This method blocks until + * all remaining bytes have been read and end of stream is detected, or an + * exception is thrown. This method does not close the input stream. + * + *

When this stream reaches end of stream, further invocations of this + * method will return an empty byte array. + * + *

Note that this method is intended for simple cases where it is + * convenient to read all bytes into a byte array. It is not intended for + * reading input streams with large amounts of data. + * + *

The behavior for the case where the input stream is asynchronously + * closed, or the thread interrupted during the read, is highly input + * stream specific, and therefore not specified. + * + *

If an I/O error occurs reading from the input stream, then it may do + * so after some, but not all, bytes have been read. Consequently the input + * stream may not be at end of stream and may be in an inconsistent state. + * It is strongly recommended that the stream be promptly closed if an I/O + * error occurs. + * + * @return a byte array containing the bytes read from this input stream + * @throws IOException if an I/O error occurs + * @throws OutOfMemoryError if an array of the required size cannot be + * allocated. For example, if an array larger than {@code 2GB} would + * be required to store the bytes. + * + * @since 1.9 + */ + public byte[] readAllBytes() throws IOException { + byte[] buf = new byte[DEFAULT_BUFFER_SIZE]; + int capacity = buf.length; + int nread = 0; + int n; + for (;;) { + // read to EOF which may read more or less than initial buffer size + while ((n = read(buf, nread, capacity - nread)) > 0) + nread += n; + + // if the last call to read returned -1, then we're done + if (n < 0) + break; + + // need to allocate a larger buffer + if (capacity <= MAX_BUFFER_SIZE - capacity) { + capacity = capacity << 1; + } else { + if (capacity == MAX_BUFFER_SIZE) + throw new OutOfMemoryError("Required array size too large"); + capacity = MAX_BUFFER_SIZE; + } + buf = Arrays.copyOf(buf, capacity); + } + return (capacity == nread) ? buf : Arrays.copyOf(buf, nread); + } + + /** + * Reads the requested number of bytes from the input stream into the given + * byte array. This method blocks until {@code len} bytes of input data have + * been read, end of stream is detected, or an exception is thrown. The + * number of bytes actually read, possibly zero, is returned. This method + * does not close the input stream. + * + *

In the case where end of stream is reached before {@code len} bytes + * have been read, then the actual number of bytes read will be returned. + * When this stream reaches end of stream, further invocations of this + * method will return zero. + * + *

If {@code len} is zero, then no bytes are read and {@code 0} is + * returned; otherwise, there is an attempt to read up to {@code len} bytes. + * + *

The first byte read is stored into element {@code b[off]}, the next + * one in to {@code b[off+1]}, and so on. The number of bytes read is, at + * most, equal to {@code len}. Let k be the number of bytes actually + * read; these bytes will be stored in elements {@code b[off]} through + * {@code b[off+}k{@code -1]}, leaving elements {@code b[off+}k + * {@code ]} through {@code b[off+len-1]} unaffected. + * + *

The behavior for the case where the input stream is asynchronously + * closed, or the thread interrupted during the read, is highly input + * stream specific, and therefore not specified. + * + *

If an I/O error occurs reading from the input stream, then it may do + * so after some, but not all, bytes of {@code b} have been updated with + * data from the input stream. Consequently the input stream and {@code b} + * may be in an inconsistent state. It is strongly recommended that the + * stream be promptly closed if an I/O error occurs. + * + * @param b the byte array into which the data is read + * @param off the start offset in {@code b} at which the data is written + * @param len the maximum number of bytes to read + * @return the actual number of bytes read into the buffer + * @throws IOException if an I/O error occurs + * @throws NullPointerException if {@code b} is {@code null} + * @throws IndexOutOfBoundsException If {@code off} is negative, {@code len} + * is negative, or {@code len} is greater than {@code b.length - off} + * + * @since 1.9 + */ + public int readNBytes(byte[] b, int off, int len) throws IOException { + Objects.requireNonNull(b); + if (off < 0 || len < 0 || len > b.length - off) + throw new IndexOutOfBoundsException(); + int n = 0; + while (n < len) { + int count = read(b, off + n, len - n); + if (count < 0) + break; + n += count; + } + return n; + } + + /** * Skips over and discards n bytes of data from this input * stream. The skip method may, for a variety of reasons, end * up skipping over some smaller number of bytes, possibly 0. @@ -396,9 +519,9 @@ public long transferTo(OutputStream out) throws IOException { Objects.requireNonNull(out, "out"); long transferred = 0; - byte[] buffer = new byte[TRANSFER_BUFFER_SIZE]; + byte[] buffer = new byte[DEFAULT_BUFFER_SIZE]; int read; - while ((read = this.read(buffer, 0, TRANSFER_BUFFER_SIZE)) >= 0) { + while ((read = this.read(buffer, 0, DEFAULT_BUFFER_SIZE)) >= 0) { out.write(buffer, 0, read); transferred += read; } diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/java/io/ObjectInputStream.java --- a/jdk/src/java.base/share/classes/java/io/ObjectInputStream.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/src/java.base/share/classes/java/io/ObjectInputStream.java Wed Jul 05 20:37:12 2017 +0200 @@ -253,9 +253,6 @@ /** flag set when at end of field value block with no TC_ENDBLOCKDATA */ private boolean defaultDataEnd = false; - /** buffer for reading primitive field values */ - private byte[] primVals; - /** if true, invoke readObjectOverride() instead of readObject() */ private final boolean enableOverride; /** if true, invoke resolveObject() */ @@ -500,7 +497,11 @@ Object curObj = ctx.getObj(); ObjectStreamClass curDesc = ctx.getDesc(); bin.setBlockDataMode(false); - defaultReadFields(curObj, curDesc); + FieldValues vals = defaultReadFields(curObj, curDesc); + if (curObj != null) { + defaultCheckFieldValues(curObj, curDesc, vals); + defaultSetFieldValues(curObj, curDesc, vals); + } bin.setBlockDataMode(true); if (!curDesc.hasWriteObjectData()) { /* @@ -1881,6 +1882,26 @@ throws IOException { ObjectStreamClass.ClassDataSlot[] slots = desc.getClassDataLayout(); + // Best effort Failure Atomicity; slotValues will be non-null if field + // values can be set after reading all field data in the hierarchy. + // Field values can only be set after reading all data if there are no + // user observable methods in the hierarchy, readObject(NoData). The + // top most Serializable class in the hierarchy can be skipped. + FieldValues[] slotValues = null; + + boolean hasSpecialReadMethod = false; + for (int i = 1; i < slots.length; i++) { + ObjectStreamClass slotDesc = slots[i].desc; + if (slotDesc.hasReadObjectMethod() + || slotDesc.hasReadObjectNoDataMethod()) { + hasSpecialReadMethod = true; + break; + } + } + // No special read methods, can store values and defer setting. + if (!hasSpecialReadMethod) + slotValues = new FieldValues[slots.length]; + for (int i = 0; i < slots.length; i++) { ObjectStreamClass slotDesc = slots[i].desc; @@ -1917,7 +1938,13 @@ */ defaultDataEnd = false; } else { - defaultReadFields(obj, slotDesc); + FieldValues vals = defaultReadFields(obj, slotDesc); + if (slotValues != null) { + slotValues[i] = vals; + } else if (obj != null) { + defaultCheckFieldValues(obj, slotDesc, vals); + defaultSetFieldValues(obj, slotDesc, vals); + } } if (slotDesc.hasWriteObjectData()) { skipCustomData(); @@ -1933,6 +1960,19 @@ } } } + + if (obj != null && slotValues != null) { + // Check that the non-primitive types are assignable for all slots + // before assigning. + for (int i = 0; i < slots.length; i++) { + if (slotValues[i] != null) + defaultCheckFieldValues(obj, slots[i].desc, slotValues[i]); + } + for (int i = 0; i < slots.length; i++) { + if (slotValues[i] != null) + defaultSetFieldValues(obj, slots[i].desc, slotValues[i]); + } + } } /** @@ -1964,12 +2004,22 @@ } } + private class FieldValues { + final byte[] primValues; + final Object[] objValues; + + FieldValues(byte[] primValues, Object[] objValues) { + this.primValues = primValues; + this.objValues = objValues; + } + } + /** * Reads in values of serializable fields declared by given class - * descriptor. If obj is non-null, sets field values in obj. Expects that - * passHandle is set to obj's handle before this method is called. + * descriptor. Expects that passHandle is set to obj's handle before this + * method is called. */ - private void defaultReadFields(Object obj, ObjectStreamClass desc) + private FieldValues defaultReadFields(Object obj, ObjectStreamClass desc) throws IOException { Class cl = desc.forClass(); @@ -1977,22 +2027,19 @@ throw new ClassCastException(); } + byte[] primVals = null; int primDataSize = desc.getPrimDataSize(); if (primDataSize > 0) { - if (primVals == null || primVals.length < primDataSize) { - primVals = new byte[primDataSize]; - } + primVals = new byte[primDataSize]; bin.readFully(primVals, 0, primDataSize, false); - if (obj != null) { - desc.setPrimFieldValues(obj, primVals); - } } + Object[] objVals = null; int numObjFields = desc.getNumObjFields(); if (numObjFields > 0) { int objHandle = passHandle; ObjectStreamField[] fields = desc.getFields(false); - Object[] objVals = new Object[numObjFields]; + objVals = new Object[numObjFields]; int numPrimFields = fields.length - objVals.length; for (int i = 0; i < objVals.length; i++) { ObjectStreamField f = fields[numPrimFields + i]; @@ -2001,11 +2048,30 @@ handles.markDependency(objHandle, passHandle); } } - if (obj != null) { - desc.setObjFieldValues(obj, objVals); - } passHandle = objHandle; } + + return new FieldValues(primVals, objVals); + } + + /** Throws ClassCastException if any value is not assignable. */ + private void defaultCheckFieldValues(Object obj, ObjectStreamClass desc, + FieldValues values) { + Object[] objectValues = values.objValues; + if (objectValues != null) + desc.checkObjFieldValueTypes(obj, objectValues); + } + + /** Sets field values in obj. */ + private void defaultSetFieldValues(Object obj, ObjectStreamClass desc, + FieldValues values) { + byte[] primValues = values.primValues; + Object[] objectValues = values.objValues; + + if (primValues != null) + desc.setPrimFieldValues(obj, primValues); + if (objectValues != null) + desc.setObjFieldValues(obj, objectValues); } /** diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/java/io/ObjectStreamClass.java --- a/jdk/src/java.base/share/classes/java/io/ObjectStreamClass.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/src/java.base/share/classes/java/io/ObjectStreamClass.java Wed Jul 05 20:37:12 2017 +0200 @@ -1253,6 +1253,15 @@ } /** + * Checks that the given values, from array vals starting at offset 0, + * are assignable to the given serializable object fields. + * @throws ClassCastException if any value is not assignable + */ + void checkObjFieldValueTypes(Object obj, Object[] vals) { + fieldRefl.checkObjectFieldValueTypes(obj, vals); + } + + /** * Sets the serializable object fields of object obj using values from * array vals starting at offset 0. It is the responsibility of the caller * to ensure that obj is of the proper type if non-null. @@ -2070,6 +2079,15 @@ } /** + * Checks that the given values, from array vals starting at offset 0, + * are assignable to the given serializable object fields. + * @throws ClassCastException if any value is not assignable + */ + void checkObjectFieldValueTypes(Object obj, Object[] vals) { + setObjFieldValues(obj, vals, true); + } + + /** * Sets the serializable object fields of object obj using values from * array vals starting at offset 0. The caller is responsible for * ensuring that obj is of the proper type; however, attempts to set a @@ -2077,6 +2095,10 @@ * ClassCastException. */ void setObjFieldValues(Object obj, Object[] vals) { + setObjFieldValues(obj, vals, false); + } + + private void setObjFieldValues(Object obj, Object[] vals, boolean dryRun) { if (obj == null) { throw new NullPointerException(); } @@ -2101,7 +2123,8 @@ f.getType().getName() + " in instance of " + obj.getClass().getName()); } - unsafe.putObject(obj, key, val); + if (!dryRun) + unsafe.putObject(obj, key, val); break; default: diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/java/lang/Character.java --- a/jdk/src/java.base/share/classes/java/lang/Character.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/src/java.base/share/classes/java/lang/Character.java Wed Jul 05 20:37:12 2017 +0200 @@ -646,13 +646,11 @@ */ public static final class UnicodeBlock extends Subset { /** - * 510 - the expected number of enteties + * 510 - the expected number of entities * 0.75 - the default load factor of HashMap */ - private static final int INITIAL_CAPACITY = - (int)(510 / 0.75f + 1.0f); private static Map map = - new HashMap<>(INITIAL_CAPACITY); + new HashMap<>((int)(510 / 0.75f + 1.0f)); /** * Creates a UnicodeBlock with the given identifier name. diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/java/lang/Process.java --- a/jdk/src/java.base/share/classes/java/lang/Process.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/src/java.base/share/classes/java/lang/Process.java Wed Jul 05 20:37:12 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1995, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1995, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,25 +26,31 @@ package java.lang; import java.io.*; +import java.lang.ProcessBuilder.Redirect; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ForkJoinPool; import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; /** + * {@code Process} provides control of native processes started by + * ProcessBuilder.start and Runtime.exec. + * The class provides methods for performing input from the process, performing + * output to the process, waiting for the process to complete, + * checking the exit status of the process, and destroying (killing) + * the process. * The {@link ProcessBuilder#start()} and * {@link Runtime#exec(String[],String[],File) Runtime.exec} * methods create a native process and return an instance of a * subclass of {@code Process} that can be used to control the process - * and obtain information about it. The class {@code Process} - * provides methods for performing input from the process, performing - * output to the process, waiting for the process to complete, - * checking the exit status of the process, and destroying (killing) - * the process. + * and obtain information about it. * *

The methods that create processes may not work well for special * processes on certain native platforms, such as native windowing * processes, daemon processes, Win16/DOS processes on Microsoft * Windows, or shell scripts. * - *

By default, the created subprocess does not have its own terminal + *

By default, the created process does not have its own terminal * or console. All its standard I/O (i.e. stdin, stdout, stderr) * operations will be redirected to the parent process, where they can * be accessed via the streams obtained using the methods @@ -52,35 +58,49 @@ * {@link #getInputStream()}, and * {@link #getErrorStream()}. * The parent process uses these streams to feed input to and get output - * from the subprocess. Because some native platforms only provide + * from the process. Because some native platforms only provide * limited buffer size for standard input and output streams, failure * to promptly write the input stream or read the output stream of - * the subprocess may cause the subprocess to block, or even deadlock. + * the process may cause the process to block, or even deadlock. * *

Where desired, - * subprocess I/O can also be redirected + * process I/O can also be redirected * using methods of the {@link ProcessBuilder} class. * - *

The subprocess is not killed when there are no more references to - * the {@code Process} object, but rather the subprocess + *

The process is not killed when there are no more references to + * the {@code Process} object, but rather the process * continues executing asynchronously. * - *

There is no requirement that a process represented by a {@code + *

There is no requirement that the process represented by a {@code * Process} object execute asynchronously or concurrently with respect * to the Java process that owns the {@code Process} object. * *

As of 1.5, {@link ProcessBuilder#start()} is the preferred way * to create a {@code Process}. * + *

Subclasses of Process should override the {@link #onExit()} and + * {@link #toHandle()} methods to provide a fully functional Process including the + * {@link #getPid() process id}, + * {@link #info() information about the process}, + * {@link #children() direct children}, and + * {@link #allChildren() direct and indirect children} of the process. + * Delegating to the underlying Process or ProcessHandle is typically + * easiest and most efficient. + * * @since 1.0 */ public abstract class Process { /** + * Default constructor for Process. + */ + public Process() {} + + /** * Returns the output stream connected to the normal input of the - * subprocess. Output to the stream is piped into the standard + * process. Output to the stream is piped into the standard * input of the process represented by this {@code Process} object. * - *

If the standard input of the subprocess has been redirected using + *

If the standard input of the process has been redirected using * {@link ProcessBuilder#redirectInput(Redirect) * ProcessBuilder.redirectInput} * then this method will return a @@ -90,42 +110,42 @@ * output stream to be buffered. * * @return the output stream connected to the normal input of the - * subprocess + * process */ public abstract OutputStream getOutputStream(); /** * Returns the input stream connected to the normal output of the - * subprocess. The stream obtains data piped from the standard + * process. The stream obtains data piped from the standard * output of the process represented by this {@code Process} object. * - *

If the standard output of the subprocess has been redirected using + *

If the standard output of the process has been redirected using * {@link ProcessBuilder#redirectOutput(Redirect) * ProcessBuilder.redirectOutput} * then this method will return a * null input stream. * - *

Otherwise, if the standard error of the subprocess has been + *

Otherwise, if the standard error of the process has been * redirected using * {@link ProcessBuilder#redirectErrorStream(boolean) * ProcessBuilder.redirectErrorStream} * then the input stream returned by this method will receive the - * merged standard output and the standard error of the subprocess. + * merged standard output and the standard error of the process. * *

Implementation note: It is a good idea for the returned * input stream to be buffered. * * @return the input stream connected to the normal output of the - * subprocess + * process */ public abstract InputStream getInputStream(); /** * Returns the input stream connected to the error output of the - * subprocess. The stream obtains data piped from the error output + * process. The stream obtains data piped from the error output * of the process represented by this {@code Process} object. * - *

If the standard error of the subprocess has been redirected using + *

If the standard error of the process has been redirected using * {@link ProcessBuilder#redirectError(Redirect) * ProcessBuilder.redirectError} or * {@link ProcessBuilder#redirectErrorStream(boolean) @@ -137,19 +157,19 @@ * input stream to be buffered. * * @return the input stream connected to the error output of - * the subprocess + * the process */ public abstract InputStream getErrorStream(); /** * Causes the current thread to wait, if necessary, until the * process represented by this {@code Process} object has - * terminated. This method returns immediately if the subprocess - * has already terminated. If the subprocess has not yet + * terminated. This method returns immediately if the process + * has already terminated. If the process has not yet * terminated, the calling thread will be blocked until the - * subprocess exits. + * process exits. * - * @return the exit value of the subprocess represented by this + * @return the exit value of the process represented by this * {@code Process} object. By convention, the value * {@code 0} indicates normal termination. * @throws InterruptedException if the current thread is @@ -161,10 +181,10 @@ /** * Causes the current thread to wait, if necessary, until the - * subprocess represented by this {@code Process} object has + * process represented by this {@code Process} object has * terminated, or the specified waiting time elapses. * - *

If the subprocess has already terminated then this method returns + *

If the process has already terminated then this method returns * immediately with the value {@code true}. If the process has not * terminated and the timeout value is less than, or equal to, zero, then * this method returns immediately with the value {@code false}. @@ -176,8 +196,8 @@ * * @param timeout the maximum time to wait * @param unit the time unit of the {@code timeout} argument - * @return {@code true} if the subprocess has exited and {@code false} if - * the waiting time elapsed before the subprocess has exited. + * @return {@code true} if the process has exited and {@code false} if + * the waiting time elapsed before the process has exited. * @throws InterruptedException if the current thread is interrupted * while waiting. * @throws NullPointerException if unit is null @@ -204,41 +224,60 @@ } /** - * Returns the exit value for the subprocess. + * Returns the exit value for the process. * - * @return the exit value of the subprocess represented by this + * @return the exit value of the process represented by this * {@code Process} object. By convention, the value * {@code 0} indicates normal termination. - * @throws IllegalThreadStateException if the subprocess represented + * @throws IllegalThreadStateException if the process represented * by this {@code Process} object has not yet terminated */ public abstract int exitValue(); /** - * Kills the subprocess. Whether the subprocess represented by this - * {@code Process} object is forcibly terminated or not is + * Kills the process. + * Whether the process represented by this {@code Process} object is + * {@link #supportsNormalTermination normally terminated} or not is * implementation dependent. + * Forcible process destruction is defined as the immediate termination of a + * process, whereas normal termination allows the process to shut down cleanly. + * If the process is not alive, no action is taken. + *

+ * The {@link java.util.concurrent.CompletableFuture} from {@link #onExit} is + * {@link java.util.concurrent.CompletableFuture#complete completed} + * when the process has terminated. */ public abstract void destroy(); /** - * Kills the subprocess. The subprocess represented by this + * Kills the process forcibly. The process represented by this * {@code Process} object is forcibly terminated. + * Forcible process destruction is defined as the immediate termination of a + * process, whereas normal termination allows the process to shut down cleanly. + * If the process is not alive, no action is taken. + *

+ * The {@link java.util.concurrent.CompletableFuture} from {@link #onExit} is + * {@link java.util.concurrent.CompletableFuture#complete completed} + * when the process has terminated. + *

+ * Invoking this method on {@code Process} objects returned by + * {@link ProcessBuilder#start} and {@link Runtime#exec} forcibly terminate + * the process. * - *

The default implementation of this method invokes {@link #destroy} - * and so may not forcibly terminate the process. Concrete implementations - * of this class are strongly encouraged to override this method with a - * compliant implementation. Invoking this method on {@code Process} - * objects returned by {@link ProcessBuilder#start} and - * {@link Runtime#exec} will forcibly terminate the process. - * - *

Note: The subprocess may not terminate immediately. + * @implSpec + * The default implementation of this method invokes {@link #destroy} + * and so may not forcibly terminate the process. + * @implNote + * Concrete implementations of this class are strongly encouraged to override + * this method with a compliant implementation. + * @apiNote + * The process may not terminate immediately. * i.e. {@code isAlive()} may return true for a brief period * after {@code destroyForcibly()} is called. This method * may be chained to {@code waitFor()} if needed. * * @return the {@code Process} object representing the - * subprocess to be forcibly destroyed. + * process forcibly destroyed * @since 1.8 */ public Process destroyForcibly() { @@ -247,10 +286,36 @@ } /** - * Tests whether the subprocess represented by this {@code Process} is + * Returns {@code true} if the implementation of {@link #destroy} is to + * normally terminate the process, + * Returns {@code false} if the implementation of {@code destroy} + * forcibly and immediately terminates the process. + *

+ * Invoking this method on {@code Process} objects returned by + * {@link ProcessBuilder#start} and {@link Runtime#exec} return + * {@code true} or {@code false} depending on the platform implementation. + * + * @implSpec + * This implementation throws an instance of + * {@link java.lang.UnsupportedOperationException} and performs no other action. + * + * @return {@code true} if the implementation of {@link #destroy} is to + * normally terminate the process; + * otherwise, {@link #destroy} forcibly terminates the process + * @throws UnsupportedOperationException if the Process implementation + * does not support this operation + * @since 1.9 + */ + public boolean supportsNormalTermination() { + throw new UnsupportedOperationException(this.getClass() + + ".supportsNormalTermination() not supported" ); + } + + /** + * Tests whether the process represented by this {@code Process} is * alive. * - * @return {@code true} if the subprocess represented by this + * @return {@code true} if the process represented by this * {@code Process} object has not yet terminated. * @since 1.8 */ @@ -264,16 +329,222 @@ } /** - * Returns the native process id of the subprocess. - * The native process id is an identification number that the operating + * Returns the native process ID of the process. + * The native process ID is an identification number that the operating * system assigns to the process. * - * @return the native process id of the subprocess + * @implSpec + * The implementation of this method returns the process id as: + * {@link #toHandle toHandle().getPid()}. + * + * @return the native process id of the process * @throws UnsupportedOperationException if the Process implementation - * does not support this operation + * does not support this operation * @since 1.9 */ public long getPid() { - throw new UnsupportedOperationException(); + return toHandle().getPid(); + } + + /** + * Returns a {@code CompletableFuture} for the termination of the Process. + * The {@link java.util.concurrent.CompletableFuture} provides the ability + * to trigger dependent functions or actions that may be run synchronously + * or asynchronously upon process termination. + * When the process terminates the CompletableFuture is + * {@link java.util.concurrent.CompletableFuture#complete completed} regardless + * of the exit status of the process. + *

+ * Calling {@code onExit().get()} waits for the process to terminate and returns + * the Process. The future can be used to check if the process is + * {@link java.util.concurrent.CompletableFuture#isDone done} or to + * {@link java.util.concurrent.CompletableFuture#get() wait} for it to terminate. + * {@link java.util.concurrent.CompletableFuture#cancel(boolean) Cancelling} + * the CompletableFuture does not affect the Process. + *

+ * If the process is {@link #isAlive not alive} the {@link CompletableFuture} + * returned has been {@link java.util.concurrent.CompletableFuture#complete completed}. + *

+ * Processes returned from {@link ProcessBuilder#start} override the + * default implementation to provide an efficient mechanism to wait + * for process exit. + *

+ * @apiNote + * Using {@link #onExit() onExit} is an alternative to + * {@link #waitFor() waitFor} that enables both additional concurrency + * and convenient access to the result of the Process. + * Lambda expressions can be used to evaluate the result of the Process + * execution. + * If there is other processing to be done before the value is used + * then {@linkplain #onExit onExit} is a convenient mechanism to + * free the current thread and block only if and when the value is needed. + *
+ * For example, launching a process to compare two files and get a boolean if they are identical: + *

 {@code   Process p = new ProcessBuilder("cmp", "f1", "f2").start();
+     *    Future identical = p.onExit().thenApply(p1 -> p1.exitValue() == 0);
+     *    ...
+     *    if (identical.get()) { ... }
+     * }
+ * + * @implSpec + * This implementation executes {@link #waitFor()} in a separate thread + * repeatedly until it returns successfully. If the execution of + * {@code waitFor} is interrupted, the thread's interrupt status is preserved. + *

+ * When {@link #waitFor()} returns successfully the CompletableFuture is + * {@link java.util.concurrent.CompletableFuture#complete completed} regardless + * of the exit status of the process. + * + * This implementation may consume a lot of memory for thread stacks if a + * large number of processes are waited for concurrently. + *

+ * External implementations should override this method and provide + * a more efficient implementation. For example, to delegate to the underlying + * process, it can do the following: + *

{@code
+     *    public CompletableFuture onExit() {
+     *       return delegate.onExit().thenApply(p -> this);
+     *    }
+     * }
+ * + * @return a new {@code CompletableFuture} for the Process + * + * @since 1.9 + */ + public CompletableFuture onExit() { + return CompletableFuture.supplyAsync(this::waitForInternal); } + + /** + * Wait for the process to exit by calling {@code waitFor}. + * If the thread is interrupted, remember the interrupted state to + * be restored before returning. Use ForkJoinPool.ManagedBlocker + * so that the number of workers in case ForkJoinPool is used is + * compensated when the thread blocks in waitFor(). + * + * @return the Process + */ + private Process waitForInternal() { + boolean interrupted = false; + while (true) { + try { + ForkJoinPool.managedBlock(new ForkJoinPool.ManagedBlocker() { + @Override + public boolean block() throws InterruptedException { + waitFor(); + return true; + } + + @Override + public boolean isReleasable() { + return !isAlive(); + } + }); + break; + } catch (InterruptedException x) { + interrupted = true; + } + } + if (interrupted) { + Thread.currentThread().interrupt(); + } + return this; + } + + /** + * Returns a ProcessHandle for the Process. + * + * {@code Process} objects returned by {@link ProcessBuilder#start} and + * {@link Runtime#exec} implement {@code toHandle} as the equivalent of + * {@link ProcessHandle#of(long) ProcessHandle.of(pid)} including the + * check for a SecurityManager and {@code RuntimePermission("manageProcess")}. + * + * @implSpec + * This implementation throws an instance of + * {@link java.lang.UnsupportedOperationException} and performs no other action. + * Subclasses should override this method to provide a ProcessHandle for the + * process. The methods {@link #getPid}, {@link #info}, {@link #children}, + * and {@link #allChildren}, unless overridden, operate on the ProcessHandle. + * + * @return Returns a ProcessHandle for the Process + * @throws UnsupportedOperationException if the Process implementation + * does not support this operation + * @throws SecurityException if a security manager has been installed and + * it denies RuntimePermission("manageProcess") + * @since 1.9 + */ + public ProcessHandle toHandle() { + throw new UnsupportedOperationException(this.getClass() + + ".toHandle() not supported"); + } + + /** + * Returns a snapshot of information about the process. + * + *

An {@link ProcessHandle.Info} instance has various accessor methods + * that return information about the process, if the process is alive and + * the information is available, otherwise {@code null} is returned. + * + * @implSpec + * This implementation returns information about the process as: + * {@link #toHandle toHandle().info()}. + * + * @return a snapshot of information about the process, always non-null + * @throws UnsupportedOperationException if the Process implementation + * does not support this operation + * @since 1.9 + */ + public ProcessHandle.Info info() { + return toHandle().info(); + } + + /** + * Returns a snapshot of the direct children of the process. + * A process that is {@link #isAlive not alive} has zero children. + *

+ * Note that processes are created and terminate asynchronously. + * There is no guarantee that a process is {@link #isAlive alive}. + * + * + * @implSpec + * This implementation returns the direct children as: + * {@link #toHandle toHandle().children()}. + * + * @return a Stream of ProcessHandles for processes that are direct children + * of the process + * @throws UnsupportedOperationException if the Process implementation + * does not support this operation + * @throws SecurityException if a security manager has been installed and + * it denies RuntimePermission("manageProcess") + * @since 1.9 + */ + public Stream children() { + return toHandle().children(); + } + + /** + * Returns a snapshot of the direct and indirect children of the process. + * A process that is {@link #isAlive not alive} has zero children. + *

+ * Note that processes are created and terminate asynchronously. + * There is no guarantee that a process is {@link #isAlive alive}. + * + * + * @implSpec + * This implementation returns all children as: + * {@link #toHandle toHandle().allChildren()}. + * + * @return a Stream of ProcessHandles for processes that are direct and + * indirect children of the process + * @throws UnsupportedOperationException if the Process implementation + * does not support this operation + * @throws SecurityException if a security manager has been installed and + * it denies RuntimePermission("manageProcess") + * @since 1.9 + */ + public Stream allChildren() { + return toHandle().allChildren(); + } + + } diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/java/lang/ProcessHandle.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/jdk/src/java.base/share/classes/java/lang/ProcessHandle.java Wed Jul 05 20:37:12 2017 +0200 @@ -0,0 +1,361 @@ +/* + * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.lang; + +import java.time.Duration; +import java.time.Instant; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.stream.Stream; + +/** + * ProcessHandle identifies and provides control of native processes. Each + * individual process can be monitored for liveness, list its children, + * get information about the process or destroy it. + * By comparison, {@link java.lang.Process Process} instances were started + * by the current process and additionally provide access to the process + * input, output, and error streams. + *

+ * The native process ID is an identification number that the + * operating system assigns to the process. + * The range for process id values is dependent on the operating system. + * For example, an embedded system might use a 16-bit value. + * Status information about a process is retrieved from the native system + * and may change asynchronously; processes may be created or terminate + * spontaneously. + * The time between when a process terminates and the process id + * is reused for a new process is unpredictable. + * Race conditions can exist between checking the status of a process and + * acting upon it. When using ProcessHandles avoid assumptions + * about the liveness or identity of the underlying process. + *

+ * Each ProcessHandle identifies and allows control of a process in the native + * system. ProcessHandles are returned from the factory methods {@link #current()}, + * {@link #of(long)}, + * {@link #children}, {@link #allChildren}, {@link #parent()} and + * {@link #allProcesses()}. + *

+ * The {@link Process} instances created by {@link ProcessBuilder} can be queried + * for a ProcessHandle that provides information about the Process. + * ProcessHandle references should not be freely distributed. + * + *

+ * A {@link java.util.concurrent.CompletableFuture} available from {@link #onExit} + * can be used to wait for process termination, and possibly trigger dependent + * actions. + *

+ * The factory methods limit access to ProcessHandles using the + * SecurityManager checking the {@link RuntimePermission RuntimePermission("manageProcess")}. + * The ability to control processes is also restricted by the native system, + * ProcessHandle provides no more access to, or control over, the native process + * than would be allowed by a native application. + *

+ * @implSpec + * In the case where ProcessHandles cannot be supported then the factory + * methods must consistently throw {@link java.lang.UnsupportedOperationException}. + * The methods of this class throw {@link java.lang.UnsupportedOperationException} + * if the operating system does not allow access to query or kill a process. + * + * @see Process + * @since 1.9 + */ +public interface ProcessHandle extends Comparable { + + /** + * Returns the native process ID of the process. The native process ID is an + * identification number that the operating system assigns to the process. + * + * @return the native process ID of the process + * @throws UnsupportedOperationException if the implementation + * does not support this operation + */ + long getPid(); + + /** + * Returns an {@code Optional} for an existing native process. + * + * @param pid a native process ID + * @return an {@code Optional} of the PID for the process; + * the {@code Optional} is empty if the process does not exist + * @throws SecurityException if a security manager has been installed and + * it denies RuntimePermission("manageProcess") + * @throws UnsupportedOperationException if the implementation + * does not support this operation + */ + public static Optional of(long pid) { + return ProcessHandleImpl.get(pid); + } + + /** + * Returns a ProcessHandle for the current process. The ProcessHandle cannot be + * used to destroy the current process, use {@link System#exit System.exit} instead. + * + * @return a ProcessHandle for the current process + * @throws SecurityException if a security manager has been installed and + * it denies RuntimePermission("manageProcess") + * @throws UnsupportedOperationException if the implementation + * does not support this operation + */ + public static ProcessHandle current() { + return ProcessHandleImpl.current(); + } + + /** + * Returns an {@code Optional} for the parent process. + * Note that Processes in a zombie state usually don't have a parent. + * + * @return an {@code Optional} of the parent process; + * the {@code Optional} is empty if the child process does not have a parent + * or if the parent is not available, possibly due to operating system limitations + * @throws SecurityException if a security manager has been installed and + * it denies RuntimePermission("manageProcess") + */ + Optional parent(); + + /** + * Returns a snapshot of the current direct children of the process. + * A process that is {@link #isAlive not alive} has zero children. + *

+ * Note that processes are created and terminate asynchronously. + * There is no guarantee that a process is {@link #isAlive alive}. + * + * + * @return a Stream of ProcessHandles for processes that are direct children + * of the process + * @throws SecurityException if a security manager has been installed and + * it denies RuntimePermission("manageProcess") + */ + Stream children(); + + /** + * Returns a snapshot of the current direct and indirect children of the process. + * A process that is {@link #isAlive not alive} has zero children. + *

+ * Note that processes are created and terminate asynchronously. + * There is no guarantee that a process is {@link #isAlive alive}. + * + * + * @return a Stream of ProcessHandles for processes that are direct and + * indirect children of the process + * @throws SecurityException if a security manager has been installed and + * it denies RuntimePermission("manageProcess") + */ + Stream allChildren(); + + /** + * Returns a snapshot of all processes visible to the current process. + *

+ * Note that processes are created and terminate asynchronously. There + * is no guarantee that a process in the stream is alive or that no other + * processes may have been created since the inception of the snapshot. + * + * + * @return a Stream of ProcessHandles for all processes + * @throws SecurityException if a security manager has been installed and + * it denies RuntimePermission("manageProcess") + * @throws UnsupportedOperationException if the implementation + * does not support this operation + */ + static Stream allProcesses() { + return ProcessHandleImpl.children(0); + } + + /** + * Returns a snapshot of information about the process. + * + *

An {@code Info} instance has various accessor methods that return + * information about the process, if the process is alive and the + * information is available. + * + * @return a snapshot of information about the process, always non-null + */ + Info info(); + + /** + * Information snapshot about the process. + * The attributes of a process vary by operating system and are not available + * in all implementations. Information about processes is limited + * by the operating system privileges of the process making the request. + * The return types are {@code Optional} allowing explicit tests + * and actions if the value is available. + * @since 1.9 + */ + public interface Info { + /** + * Returns the executable pathname of the process. + * + * @return an {@code Optional} of the executable pathname + * of the process + */ + public Optional command(); + + /** + * Returns an array of Strings of the arguments of the process. + * + * @return an {@code Optional} of the arguments of the process + */ + public Optional arguments(); + + /** + * Returns the start time of the process. + * + * @return an {@code Optional} of the start time of the process + */ + public Optional startInstant(); + + /** + * Returns the total cputime accumulated of the process. + * + * @return an {@code Optional} for the accumulated total cputime + */ + public Optional totalCpuDuration(); + + /** + * Return the user of the process. + * + * @return an {@code Optional} for the user of the process + */ + public Optional user(); + } + + /** + * Returns a {@code CompletableFuture} for the termination + * of the process. + * The {@link java.util.concurrent.CompletableFuture} provides the ability + * to trigger dependent functions or actions that may be run synchronously + * or asynchronously upon process termination. + * When the process terminates the CompletableFuture is + * {@link java.util.concurrent.CompletableFuture#complete completed} regardless + * of the exit status of the process. + * The {@code onExit} method can be called multiple times to invoke + * independent actions when the process exits. + *

+ * Calling {@code onExit().get()} waits for the process to terminate and returns + * the ProcessHandle. The future can be used to check if the process is + * {@link java.util.concurrent.CompletableFuture#isDone done} or to + * {@link java.util.concurrent.Future#get() wait} for it to terminate. + * {@link java.util.concurrent.Future#cancel(boolean) Cancelling} + * the CompleteableFuture does not affect the Process. + *

+ * If the process is {@link #isAlive not alive} the {@link CompletableFuture} + * returned has been {@link java.util.concurrent.CompletableFuture#complete completed}. + * + * @return a new {@code CompletableFuture} for the ProcessHandle + * + * @throws IllegalStateException if the process is the current process + */ + CompletableFuture onExit(); + + /** + * Returns {@code true} if the implementation of {@link #destroy} + * normally terminates the process. + * Returns {@code false} if the implementation of {@code destroy} + * forcibly and immediately terminates the process. + * + * @return {@code true} if the implementation of {@link #destroy} + * normally terminates the process; + * otherwise, {@link #destroy} forcibly terminates the process + */ + boolean supportsNormalTermination(); + + /** + * Requests the process to be killed. + * Whether the process represented by this {@code ProcessHandle} object is + * {@link #supportsNormalTermination normally terminated} or not is + * implementation dependent. + * Forcible process destruction is defined as the immediate termination of the + * process, whereas normal termination allows the process to shut down cleanly. + * If the process is not alive, no action is taken. + * The operating system access controls may prevent the process + * from being killed. + *

+ * The {@link java.util.concurrent.CompletableFuture} from {@link #onExit} is + * {@link java.util.concurrent.CompletableFuture#complete completed} + * when the process has terminated. + *

+ * Note: The process may not terminate immediately. + * For example, {@code isAlive()} may return true for a brief period + * after {@code destroy()} is called. + * + * @return {@code true} if termination was successfully requested, + * otherwise {@code false} + * @throws IllegalStateException if the process is the current process + */ + boolean destroy(); + + /** + * Requests the process to be killed forcibly. + * The process represented by this {@code ProcessHandle} object is + * forcibly terminated. + * Forcible process destruction is defined as the immediate termination of the + * process, whereas normal termination allows the process to shut down cleanly. + * If the process is not alive, no action is taken. + * The operating system access controls may prevent the process + * from being killed. + *

+ * The {@link java.util.concurrent.CompletableFuture} from {@link #onExit} is + * {@link java.util.concurrent.CompletableFuture#complete completed} + * when the process has terminated. + *

+ * Note: The process may not terminate immediately. + * For example, {@code isAlive()} may return true for a brief period + * after {@code destroyForcibly()} is called. + * + * @return {@code true} if termination was successfully requested, + * otherwise {@code false} + * @throws IllegalStateException if the process is the current process + */ + boolean destroyForcibly(); + + /** + * Tests whether the process represented by this {@code ProcessHandle} is alive. + * Process termination is implementation and operating system specific. + * The process is considered alive as long as the PID is valid. + * + * @return {@code true} if the process represented by this + * {@code ProcessHandle} object has not yet terminated + */ + boolean isAlive(); + + /** + * Compares this ProcessHandle with the specified ProcessHandle for order. + * The order is not specified, but is consistent with {@link Object#equals}, + * which returns {@code true} if and only if two instances of ProcessHandle + * are of the same implementation and represent the same system process. + * Comparison is only supported among objects of same implementation. + * If attempt is made to mutually compare two different implementations + * of {@link ProcessHandle}s, {@link ClassCastException} is thrown. + * + * @param other the ProcessHandle to be compared + * @return a negative integer, zero, or a positive integer as this object + * is less than, equal to, or greater than the specified object. + * @throws NullPointerException if the specified object is null + * @throws ClassCastException if the specified object is not of same class + * as this object + */ + @Override + int compareTo(ProcessHandle other); + +} diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/java/lang/ProcessHandleImpl.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/jdk/src/java.base/share/classes/java/lang/ProcessHandleImpl.java Wed Jul 05 20:37:12 2017 +0200 @@ -0,0 +1,528 @@ +/* + * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package java.lang; + +import java.security.PrivilegedAction; +import java.time.Duration; +import java.time.Instant; +import java.util.Arrays; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.Executor; +import java.util.concurrent.Executors; +import java.util.concurrent.ForkJoinPool; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; + +import sun.misc.InnocuousThread; + +import static java.security.AccessController.doPrivileged; + +/** + * ProcessHandleImpl is the implementation of ProcessHandle. + * + * @see Process + * @since 1.9 + */ +final class ProcessHandleImpl implements ProcessHandle { + + /** + * The thread pool of "process reaper" daemon threads. + */ + private static final Executor processReaperExecutor = + doPrivileged((PrivilegedAction) () -> { + + ThreadGroup tg = Thread.currentThread().getThreadGroup(); + while (tg.getParent() != null) tg = tg.getParent(); + ThreadGroup systemThreadGroup = tg; + + ThreadFactory threadFactory = grimReaper -> { + // Our thread stack requirement is quite modest. + Thread t = new Thread(systemThreadGroup, grimReaper, + "process reaper", 32768); + t.setDaemon(true); + // A small attempt (probably futile) to avoid priority inversion + t.setPriority(Thread.MAX_PRIORITY); + return t; + }; + + return Executors.newCachedThreadPool(threadFactory); + }); + + private static class ExitCompletion extends CompletableFuture { + final boolean isReaping; + + ExitCompletion(boolean isReaping) { + this.isReaping = isReaping; + } + } + + private static final ConcurrentMap + completions = new ConcurrentHashMap<>(); + + /** + * Returns a CompletableFuture that completes with process exit status when + * the process completes. + * + * @param shouldReap true if the exit value should be reaped + */ + static CompletableFuture completion(long pid, boolean shouldReap) { + // check canonicalizing cache 1st + ExitCompletion completion = completions.get(pid); + // re-try until we get a completion that shouldReap => isReaping + while (completion == null || (shouldReap && !completion.isReaping)) { + ExitCompletion newCompletion = new ExitCompletion(shouldReap); + if (completion == null) { + completion = completions.putIfAbsent(pid, newCompletion); + } else { + completion = completions.replace(pid, completion, newCompletion) + ? null : completions.get(pid); + } + if (completion == null) { + // newCompletion has just been installed successfully + completion = newCompletion; + // spawn a thread to wait for and deliver the exit value + processReaperExecutor.execute(() -> { + int exitValue = waitForProcessExit0(pid, shouldReap); + newCompletion.complete(exitValue); + // remove from cache afterwards + completions.remove(pid, newCompletion); + }); + } + } + return completion; + } + + @Override + public CompletableFuture onExit() { + if (this.equals(current)) { + throw new IllegalStateException("onExit for current process not allowed"); + } + + return ProcessHandleImpl.completion(getPid(), false) + .handleAsync((exitStatus, unusedThrowable) -> this); + } + + /** + * Wait for the process to exit, return the value. + * Conditionally reap the value if requested + * @param pid the processId + * @param reapvalue if true, the value is retrieved, + * else return the value and leave the process waitable + * + * @return the value or -1 if an error occurs + */ + private static native int waitForProcessExit0(long pid, boolean reapvalue); + + /** + * Cache the ProcessHandle of this process. + */ + private static final ProcessHandleImpl current = + new ProcessHandleImpl(getCurrentPid0()); + + /** + * The pid of this ProcessHandle. + */ + private final long pid; + + /** + * Private constructor. Instances are created by the {@code get(long)} factory. + * @param pid the pid for this instance + */ + private ProcessHandleImpl(long pid) { + this.pid = pid; + } + + /** + * Returns a ProcessHandle for an existing native process. + * + * @param pid the native process identifier + * @return The ProcessHandle for the pid if the process is alive; + * or {@code null} if the process ID does not exist in the native system. + * @throws SecurityException if RuntimePermission("manageProcess") is not granted + */ + static Optional get(long pid) { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(new RuntimePermission("manageProcess")); + } + return Optional.ofNullable(isAlive0(pid) ? new ProcessHandleImpl(pid) : null); + } + + /** + * Returns a ProcessHandle corresponding known to exist pid. + * Called from ProcessImpl, it does not perform a security check or check if the process is alive. + * @param pid of the known to exist process + * @return a ProcessHandle corresponding to an existing Process instance + */ + static ProcessHandle getUnchecked(long pid) { + return new ProcessHandleImpl(pid); + } + + /** + * Returns the native process ID. + * A {@code long} is used to be able to fit the system specific binary values + * for the process. + * + * @return the native process ID + */ + @Override + public long getPid() { + return pid; + } + + /** + * Returns the ProcessHandle for the current native process. + * + * @return The ProcessHandle for the OS process. + * @throws SecurityException if RuntimePermission("manageProcess") is not granted + */ + public static ProcessHandleImpl current() { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(new RuntimePermission("manageProcess")); + } + return current; + } + + /** + * Return the pid of the current process. + * + * @return the pid of the current process + */ + private static native long getCurrentPid0(); + + /** + * Returns a ProcessHandle for the parent process. + * + * @return a ProcessHandle of the parent process; {@code null} is returned + * if the child process does not have a parent + * @throws SecurityException if permission is not granted by the + * security policy + */ + static Optional parent(long pid) { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(new RuntimePermission("manageProcess")); + } + long ppid = parent0(pid); + if (ppid <= 0) { + return Optional.empty(); + } + return get(ppid); + } + + /** + * Returns the parent of the native pid argument. + * + * @return the parent of the native pid; if any, otherwise -1 + */ + private static native long parent0(long pid); + + /** + * Returns the number of pids filled in to the array. + * @param pid if {@code pid} equals zero, then all known processes are returned; + * otherwise only direct child process pids are returned + * @param pids an allocated long array to receive the pids + * @param ppids an allocated long array to receive the parent pids; may be null + * @return if greater than or equals to zero is the number of pids in the array; + * if greater than the length of the arrays, the arrays are too small + */ + private static native int getProcessPids0(long pid, long[] pids, long[] ppids); + + /** + * Destroy the process for this ProcessHandle. + * @param pid the processs ID to destroy + * @param force {@code true} if the process should be terminated forcibly; + * else {@code false} for a normal termination + */ + static void destroyProcess(long pid, boolean force) { + destroy0(pid, force); + } + + private static native boolean destroy0(long pid, boolean forcibly); + + @Override + public boolean destroy() { + if (this.equals(current)) { + throw new IllegalStateException("destroy of current process not allowed"); + } + return destroy0(getPid(), false); + } + + @Override + public boolean destroyForcibly() { + if (this.equals(current)) { + throw new IllegalStateException("destroy of current process not allowed"); + } + return destroy0(getPid(), true); + } + + + @Override + public boolean supportsNormalTermination() { + return ProcessImpl.SUPPORTS_NORMAL_TERMINATION; + } + + /** + * Tests whether the process represented by this {@code ProcessHandle} is alive. + * + * @return {@code true} if the process represented by this + * {@code ProcessHandle} object has not yet terminated. + * @since 1.9 + */ + @Override + public boolean isAlive() { + return isAlive0(pid); + } + + /** + * Returns true or false depending on whether the pid is alive. + * This must not reap the exitValue like the isAlive method above. + * + * @param pid the pid to check + * @return true or false + */ + private static native boolean isAlive0(long pid); + + @Override + public Optional parent() { + return parent(pid); + } + + @Override + public Stream children() { + return children(pid); + } + + /** + * Returns a Stream of the children of a process or all processes. + * + * @param pid the pid of the process for which to find the children; + * 0 for all processes + * @return a stream of ProcessHandles + */ + static Stream children(long pid) { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(new RuntimePermission("manageProcess")); + } + int size = 100; + long[] childpids = null; + while (childpids == null || size > childpids.length) { + childpids = new long[size]; + size = getProcessPids0(pid, childpids, null); + } + return Arrays.stream(childpids, 0, size).mapToObj((id) -> new ProcessHandleImpl(id)); + } + + @Override + public Stream allChildren() { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(new RuntimePermission("manageProcess")); + } + int size = 100; + long[] pids = null; + long[] ppids = null; + while (pids == null || size > pids.length) { + pids = new long[size]; + ppids = new long[size]; + size = getProcessPids0(0, pids, ppids); + } + + int next = 0; // index of next process to check + int count = -1; // count of subprocesses scanned + long ppid = pid; // start looking for this parent + do { + // Scan from next to size looking for ppid + // if found, exchange it to index next + for (int i = next; i < size; i++) { + if (ppids[i] == ppid) { + swap(pids, i, next); + swap(ppids, i, next); + next++; + } + } + ppid = pids[++count]; // pick up the next pid to scan for + } while (count < next); + + return Arrays.stream(pids, 0, count).mapToObj((id) -> new ProcessHandleImpl(id)); + } + + // Swap two elements in an array + private static void swap(long[] array, int x, int y) { + long v = array[x]; + array[x] = array[y]; + array[y] = v; + } + + @Override + public ProcessHandle.Info info() { + return ProcessHandleImpl.Info.info(pid); + } + + @Override + public int compareTo(ProcessHandle other) { + return Long.compare(pid, ((ProcessHandleImpl) other).pid); + } + + @Override + public String toString() { + return Long.toString(pid); + } + + @Override + public int hashCode() { + return Long.hashCode(pid); + } + + @Override + public boolean equals(Object obj) { + return (obj instanceof ProcessHandleImpl) && + (pid == ((ProcessHandleImpl) obj).pid); + } + + /** + * Implementation of ProcessHandle.Info. + * Information snapshot about a process. + * The attributes of a process vary by operating system and not available + * in all implementations. Additionally, information about other processes + * is limited by the operating system privileges of the process making the request. + * If a value is not available, either a {@code null} or {@code -1} is stored. + * The accessor methods return {@code null} if the value is not available. + */ + static class Info implements ProcessHandle.Info { + static { + initIDs(); + } + + /** + * Initialization of JNI fieldIDs. + */ + private static native void initIDs(); + + /** + * Fill in this Info instance with information about the native process. + * If values are not available the native code does not modify the field. + * @param pid of the native process + */ + private native void info0(long pid); + + String command; + String[] arguments; + long startTime; + long totalTime; + String user; + + Info() { + command = null; + arguments = null; + startTime = -1L; + totalTime = -1L; + user = null; + } + + /** + * Returns the Info object with the fields from the process. + * Whatever fields are provided by native are returned. + * + * @param pid the native process identifier + * @return ProcessHandle.Info non-null; individual fields may be null + * or -1 if not available. + */ + public static ProcessHandle.Info info(long pid) { + Info info = new Info(); + info.info0(pid); + return info; + } + + @Override + public Optional command() { + return Optional.ofNullable(command); + } + + @Override + public Optional arguments() { + return Optional.ofNullable(arguments); + } + + @Override + public Optional startInstant() { + return (startTime > 0) + ? Optional.of(Instant.ofEpochMilli(startTime)) + : Optional.empty(); + } + + @Override + public Optional totalCpuDuration() { + return (totalTime != -1) + ? Optional.of(Duration.ofNanos(totalTime)) + : Optional.empty(); + } + + @Override + public Optional user() { + return Optional.ofNullable(user); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(60); + sb.append('['); + if (user != null) { + sb.append("user: "); + sb.append(user()); + } + if (command != null) { + if (sb.length() != 0) sb.append(", "); + sb.append("cmd: "); + sb.append(command); + } + if (arguments != null && arguments.length > 0) { + if (sb.length() != 0) sb.append(", "); + sb.append("args: "); + sb.append(Arrays.toString(arguments)); + } + if (startTime != -1) { + if (sb.length() != 0) sb.append(", "); + sb.append("startTime: "); + sb.append(startInstant()); + } + if (totalTime != -1) { + if (sb.length() != 0) sb.append(", "); + sb.append("totalTime: "); + sb.append(totalCpuDuration().toString()); + } + sb.append(']'); + return sb.toString(); + } + } +} diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/java/lang/RuntimePermission.java --- a/jdk/src/java.base/share/classes/java/lang/RuntimePermission.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/src/java.base/share/classes/java/lang/RuntimePermission.java Wed Jul 05 20:37:12 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -333,6 +333,12 @@ * "../../../technotes/guides/plugin/developer_guide/rsa_how.html#use"> * usePolicy Permission. * + * + * manageProcess + * Native process termination and information about processes + * {@link ProcessHandle}. + * Allows code to identify and terminate processes that it did not create. + * * * * localeServiceProvider diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/java/lang/String.java --- a/jdk/src/java.base/share/classes/java/lang/String.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/src/java.base/share/classes/java/lang/String.java Wed Jul 05 20:37:12 2017 +0200 @@ -2247,8 +2247,29 @@ * @since 1.5 */ public String replace(CharSequence target, CharSequence replacement) { - return Pattern.compile(target.toString(), Pattern.LITERAL).matcher( - this).replaceAll(Matcher.quoteReplacement(replacement.toString())); + String starget = target.toString(); + String srepl = replacement.toString(); + int j = indexOf(starget); + if (j < 0) { + return this; + } + int targLen = starget.length(); + int targLen1 = Math.max(targLen, 1); + final char[] value = this.value; + final char[] replValue = srepl.value; + int newLenHint = value.length - targLen + replValue.length; + if (newLenHint < 0) { + throw new OutOfMemoryError(); + } + StringBuilder sb = new StringBuilder(newLenHint); + int i = 0; + do { + sb.append(value, i, j - i) + .append(replValue); + i = j + targLen; + } while (j < value.length && (j = indexOf(starget, j + targLen1)) > 0); + + return sb.append(value, i, value.length - i).toString(); } /** diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/java/net/AbstractPlainSocketImpl.java --- a/jdk/src/java.base/share/classes/java/net/AbstractPlainSocketImpl.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/src/java.base/share/classes/java/net/AbstractPlainSocketImpl.java Wed Jul 05 20:37:12 2017 +0200 @@ -312,11 +312,16 @@ ret = socketGetOption(opt, null); return ret; case IP_TOS: - ret = socketGetOption(opt, null); - if (ret == -1) { // ipv6 tos - return trafficClass; - } else { - return ret; + try { + ret = socketGetOption(opt, null); + if (ret == -1) { // ipv6 tos + return trafficClass; + } else { + return ret; + } + } catch (SocketException se) { + // TODO - should make better effort to read TOS or TCLASS + return trafficClass; // ipv6 tos } case SO_KEEPALIVE: ret = socketGetOption(opt, null); diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/java/net/DatagramSocket.java --- a/jdk/src/java.base/share/classes/java/net/DatagramSocket.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/src/java.base/share/classes/java/net/DatagramSocket.java Wed Jul 05 20:37:12 2017 +0200 @@ -1184,7 +1184,14 @@ if (isClosed()) throw new SocketException("Socket is closed"); - getImpl().setOption(SocketOptions.IP_TOS, tc); + try { + getImpl().setOption(SocketOptions.IP_TOS, tc); + } catch (SocketException se) { + // not supported if socket already connected + // Solaris returns error in such cases + if(!isConnected()) + throw se; + } } /** diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/java/net/Socket.java --- a/jdk/src/java.base/share/classes/java/net/Socket.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/src/java.base/share/classes/java/net/Socket.java Wed Jul 05 20:37:12 2017 +0200 @@ -1380,7 +1380,14 @@ if (isClosed()) throw new SocketException("Socket is closed"); - getImpl().setOption(SocketOptions.IP_TOS, tc); + try { + getImpl().setOption(SocketOptions.IP_TOS, tc); + } catch (SocketException se) { + // not supported if socket already connected + // Solaris returns error in such cases + if(!isConnected()) + throw se; + } } /** diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/java/time/zone/ZoneOffsetTransition.java --- a/jdk/src/java.base/share/classes/java/time/zone/ZoneOffsetTransition.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/src/java.base/share/classes/java/time/zone/ZoneOffsetTransition.java Wed Jul 05 20:37:12 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -156,6 +156,7 @@ * @param offsetAfter the offset at and after the transition, not null */ ZoneOffsetTransition(LocalDateTime transition, ZoneOffset offsetBefore, ZoneOffset offsetAfter) { + assert transition.getNano() == 0; this.epochSecond = transition.toEpochSecond(offsetBefore); this.transition = transition; this.offsetBefore = offsetBefore; @@ -250,7 +251,7 @@ * @return the transition instant, not null */ public Instant getInstant() { - return transition.toInstant(offsetBefore); + return Instant.ofEpochSecond(epochSecond); } /** @@ -403,13 +404,7 @@ */ @Override public int compareTo(ZoneOffsetTransition transition) { - if (epochSecond < transition.epochSecond) { - return -1; - } else if (epochSecond > transition.epochSecond) { - return 1; - } else { - return this.getInstant().compareTo(transition.getInstant()); - } + return Long.compare(epochSecond, transition.epochSecond); } //----------------------------------------------------------------------- @@ -429,7 +424,6 @@ if (other instanceof ZoneOffsetTransition) { ZoneOffsetTransition d = (ZoneOffsetTransition) other; return epochSecond == d.epochSecond && - transition.equals(d.transition) && offsetBefore.equals(d.offsetBefore) && offsetAfter.equals(d.offsetAfter); } return false; diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/java/time/zone/ZoneOffsetTransitionRule.java --- a/jdk/src/java.base/share/classes/java/time/zone/ZoneOffsetTransitionRule.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/src/java.base/share/classes/java/time/zone/ZoneOffsetTransitionRule.java Wed Jul 05 20:37:12 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -167,6 +167,7 @@ * @return the rule, not null * @throws IllegalArgumentException if the day of month indicator is invalid * @throws IllegalArgumentException if the end of day flag is true when the time is not midnight + * @throws IllegalArgumentException if {@code time.getNano()} returns non-zero value */ public static ZoneOffsetTransitionRule of( Month month, @@ -190,6 +191,9 @@ if (timeEndOfDay && time.equals(LocalTime.MIDNIGHT) == false) { throw new IllegalArgumentException("Time must be midnight when end of day flag is true"); } + if (time.getNano() != 0) { + throw new IllegalArgumentException("Time's nano-of-second must be zero"); + } return new ZoneOffsetTransitionRule(month, dayOfMonthIndicator, dayOfWeek, time, timeEndOfDay, timeDefnition, standardOffset, offsetBefore, offsetAfter); } @@ -220,6 +224,7 @@ ZoneOffset standardOffset, ZoneOffset offsetBefore, ZoneOffset offsetAfter) { + assert time.getNano() == 0; this.month = month; this.dom = (byte) dayOfMonthIndicator; this.dow = dayOfWeek; diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/java/util/Enumeration.java --- a/jdk/src/java.base/share/classes/java/util/Enumeration.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/src/java.base/share/classes/java/util/Enumeration.java Wed Jul 05 20:37:12 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1994, 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1994, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,11 +40,14 @@ * vector, the keys of a hashtable, and the values in a hashtable. * Enumerations are also used to specify the input streams to a * SequenceInputStream. - *

- * NOTE: The functionality of this interface is duplicated by the Iterator - * interface. In addition, Iterator adds an optional remove operation, and - * has shorter method names. New implementations should consider using - * Iterator in preference to Enumeration. + * + * @apiNote + * The functionality of this interface is duplicated by the {@link Iterator} + * interface. In addition, {@code Iterator} adds an optional remove operation, + * and has shorter method names. New implementations should consider using + * {@code Iterator} in preference to {@code Enumeration}. It is possible to + * adapt an {@code Enumeration} to an {@code Iterator} by using the + * {@link #asIterator} method. * * @see java.util.Iterator * @see java.io.SequenceInputStream @@ -76,4 +79,49 @@ * @exception NoSuchElementException if no more elements exist. */ E nextElement(); + + /** + * Returns an {@link Iterator} that traverses the remaining elements + * covered by this enumeration. Traversal is undefined if any methods + * are called on this enumeration after the call to {@code asIterator}. + * + * @apiNote + * This method is intended to help adapt code that produces + * {@code Enumeration} instances to code that consumes {@code Iterator} + * instances. For example, the {@link java.util.jar.JarFile#entries + * JarFile.entries()} method returns an {@code Enumeration}. + * This can be turned into an {@code Iterator}, and then the + * {@code forEachRemaining()} method can be used: + * + *

{@code
+     *     JarFile jarFile = ... ;
+     *     jarFile.entries().asIterator().forEachRemaining(entry -> { ... });
+     * }
+ * + * (Note that there is also a {@link java.util.jar.JarFile#stream + * JarFile.stream()} method that returns a {@code Stream} of entries, + * which may be more convenient in some cases.) + * + * @implSpec + * The default implementation returns an {@code Iterator} whose + * {@link Iterator#hasNext hasNext} method calls this Enumeration's + * {@code hasMoreElements} method, whose {@link Iterator#next next} + * method calls this Enumeration's {@code nextElement} method, and + * whose {@link Iterator#remove remove} method throws + * {@code UnsupportedOperationException}. + * + * @return an Iterator representing the remaining elements of this Enumeration + * + * @since 1.9 + */ + default Iterator asIterator() { + return new Iterator<>() { + @Override public boolean hasNext() { + return hasMoreElements(); + } + @Override public E next() { + return nextElement(); + } + }; + } } diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/java/util/Iterator.java --- a/jdk/src/java.base/share/classes/java/util/Iterator.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/src/java.base/share/classes/java/util/Iterator.java Wed Jul 05 20:37:12 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,6 +43,10 @@ * * Java Collections Framework. * + * @apiNote + * An {@link Enumeration} can be converted into an {@code Iterator} by + * using the {@link Enumeration#asIterator} method. + * * @param the type of elements returned by this iterator * * @author Josh Bloch diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/javax/net/ssl/ExtendedSSLSession.java --- a/jdk/src/java.base/share/classes/javax/net/ssl/ExtendedSSLSession.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/src/java.base/share/classes/javax/net/ssl/ExtendedSSLSession.java Wed Jul 05 20:37:12 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ import java.util.List; /** - * Extends the SSLSession interface to support additional + * Extends the {@code SSLSession} interface to support additional * session attributes. * * @since 1.7 @@ -39,8 +39,8 @@ * is willing to use. *

* Note: this method is used to indicate to the peer which signature - * algorithms may be used for digital signatures in TLS 1.2. It is - * not meaningful for TLS versions prior to 1.2. + * algorithms may be used for digital signatures in TLS/DTLS 1.2. It is + * not meaningful for TLS/DTLS versions prior to 1.2. *

* The signature algorithm name must be a standard Java Security * name (such as "SHA1withRSA", "SHA256withECDSA", and so on). @@ -52,7 +52,7 @@ * Note: the local supported signature algorithms should conform to * the algorithm constraints specified by * {@link SSLParameters#getAlgorithmConstraints getAlgorithmConstraints()} - * method in SSLParameters. + * method in {@code SSLParameters}. * * @return An array of supported signature algorithms, in descending * order of preference. The return value is an empty array if @@ -67,8 +67,8 @@ * able to use. *

* Note: this method is used to indicate to the local side which signature - * algorithms may be used for digital signatures in TLS 1.2. It is - * not meaningful for TLS versions prior to 1.2. + * algorithms may be used for digital signatures in TLS/DTLS 1.2. It is + * not meaningful for TLS/DTLS versions prior to 1.2. *

* The signature algorithm name must be a standard Java Security * name (such as "SHA1withRSA", "SHA256withECDSA", and so on). diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/javax/net/ssl/SNIServerName.java --- a/jdk/src/java.base/share/classes/javax/net/ssl/SNIServerName.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/src/java.base/share/classes/javax/net/ssl/SNIServerName.java Wed Jul 05 20:37:12 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,7 @@ * Instances of this class represent a server name in a Server Name * Indication (SNI) extension. *

- * The SNI extension is a feature that extends the SSL/TLS protocols to + * The SNI extension is a feature that extends the SSL/TLS/DTLS protocols to * indicate what server name the client is attempting to connect to during * handshaking. See section 3, "Server Name Indication", of TLS Extensions (RFC 6066). diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/javax/net/ssl/SSLContext.java --- a/jdk/src/java.base/share/classes/javax/net/ssl/SSLContext.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/src/java.base/share/classes/javax/net/ssl/SSLContext.java Wed Jul 05 20:37:12 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,12 +32,12 @@ /** * Instances of this class represent a secure socket protocol * implementation which acts as a factory for secure socket - * factories or SSLEngines. This class is initialized + * factories or {@code SSLEngine}s. This class is initialized * with an optional set of key and trust managers and source of * secure random bytes. * *

Every implementation of the Java platform is required to support the - * following standard SSLContext protocol: + * following standard {@code SSLContext} protocol: *

    *
  • TLSv1
  • *
@@ -79,7 +79,7 @@ *

If a default context was set using the {@link #setDefault * SSLContext.setDefault()} method, it is returned. Otherwise, the first * call of this method triggers the call - * SSLContext.getInstance("Default"). + * {@code SSLContext.getInstance("Default")}. * If successful, that object is made the default SSL context and returned. * *

The default context is immediately @@ -106,8 +106,8 @@ * @param context the SSLContext * @throws NullPointerException if context is null * @throws SecurityException if a security manager exists and its - * checkPermission method does not allow - * SSLPermission("setDefaultSSLContext") + * {@code checkPermission} method does not allow + * {@code SSLPermission("setDefaultSSLContext")} * @since 1.6 */ public static synchronized void setDefault(SSLContext context) { @@ -122,7 +122,7 @@ } /** - * Returns a SSLContext object that implements the + * Returns a {@code SSLContext} object that implements the * specified secure socket protocol. * *

This method traverses the list of registered security Providers, @@ -141,7 +141,7 @@ * Documentation * for information about standard protocol names. * - * @return the new SSLContext object. + * @return the new {@code SSLContext} object. * * @exception NoSuchAlgorithmException if no Provider supports a * SSLContextSpi implementation for the @@ -159,7 +159,7 @@ } /** - * Returns a SSLContext object that implements the + * Returns a {@code SSLContext} object that implements the * specified secure socket protocol. * *

A new SSLContext object encapsulating the @@ -179,7 +179,7 @@ * * @param provider the name of the provider. * - * @return the new SSLContext object. + * @return the new {@code SSLContext} object. * * @throws NoSuchAlgorithmException if a SSLContextSpi * implementation for the specified protocol is not @@ -202,7 +202,7 @@ } /** - * Returns a SSLContext object that implements the + * Returns a {@code SSLContext} object that implements the * specified secure socket protocol. * *

A new SSLContext object encapsulating the @@ -219,7 +219,7 @@ * * @param provider an instance of the provider. * - * @return the new SSLContext object. + * @return the new {@code SSLContext} object. * * @throws NoSuchAlgorithmException if a SSLContextSpi * implementation for the specified protocol is not available @@ -239,22 +239,22 @@ } /** - * Returns the protocol name of this SSLContext object. + * Returns the protocol name of this {@code SSLContext} object. * *

This is the same name that was specified in one of the - * getInstance calls that created this - * SSLContext object. + * {@code getInstance} calls that created this + * {@code SSLContext} object. * - * @return the protocol name of this SSLContext object. + * @return the protocol name of this {@code SSLContext} object. */ public final String getProtocol() { return this.protocol; } /** - * Returns the provider of this SSLContext object. + * Returns the provider of this {@code SSLContext} object. * - * @return the provider of this SSLContext object + * @return the provider of this {@code SSLContext} object */ public final Provider getProvider() { return this.provider; @@ -283,31 +283,35 @@ } /** - * Returns a SocketFactory object for this + * Returns a {@code SocketFactory} object for this * context. * - * @return the SocketFactory object + * @return the {@code SocketFactory} object + * @throws UnsupportedOperationException if the underlying provider + * does not implement the operation. * @throws IllegalStateException if the SSLContextImpl requires - * initialization and the init() has not been called + * initialization and the {@code init()} has not been called */ public final SSLSocketFactory getSocketFactory() { return contextSpi.engineGetSocketFactory(); } /** - * Returns a ServerSocketFactory object for + * Returns a {@code ServerSocketFactory} object for * this context. * - * @return the ServerSocketFactory object + * @return the {@code ServerSocketFactory} object + * @throws UnsupportedOperationException if the underlying provider + * does not implement the operation. * @throws IllegalStateException if the SSLContextImpl requires - * initialization and the init() has not been called + * initialization and the {@code init()} has not been called */ public final SSLServerSocketFactory getServerSocketFactory() { return contextSpi.engineGetServerSocketFactory(); } /** - * Creates a new SSLEngine using this context. + * Creates a new {@code SSLEngine} using this context. *

* Applications using this factory method are providing no hints * for an internal session reuse strategy. If hints are desired, @@ -317,11 +321,11 @@ * Some cipher suites (such as Kerberos) require remote hostname * information, in which case this factory method should not be used. * - * @return the SSLEngine object + * @return the {@code SSLEngine} object * @throws UnsupportedOperationException if the underlying provider * does not implement the operation. * @throws IllegalStateException if the SSLContextImpl requires - * initialization and the init() has not been called + * initialization and the {@code init()} has not been called * @since 1.5 */ public final SSLEngine createSSLEngine() { @@ -338,7 +342,7 @@ } /** - * Creates a new SSLEngine using this context using + * Creates a new {@code SSLEngine} using this context using * advisory peer information. *

* Applications using this factory method are providing hints @@ -349,11 +353,11 @@ * * @param peerHost the non-authoritative name of the host * @param peerPort the non-authoritative port - * @return the new SSLEngine object + * @return the new {@code SSLEngine} object * @throws UnsupportedOperationException if the underlying provider * does not implement the operation. * @throws IllegalStateException if the SSLContextImpl requires - * initialization and the init() has not been called + * initialization and the {@code init()} has not been called * @since 1.5 */ public final SSLEngine createSSLEngine(String peerHost, int peerPort) { diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/javax/net/ssl/SSLContextSpi.java --- a/jdk/src/java.base/share/classes/javax/net/ssl/SSLContextSpi.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/src/java.base/share/classes/javax/net/ssl/SSLContextSpi.java Wed Jul 05 20:37:12 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ /** * This class defines the Service Provider Interface (SPI) - * for the SSLContext class. + * for the {@code SSLContext} class. * *

All the abstract methods in this class must be implemented by each * cryptographic service provider who wishes to supply the implementation @@ -52,31 +52,35 @@ SecureRandom sr) throws KeyManagementException; /** - * Returns a SocketFactory object for this + * Returns a {@code SocketFactory} object for this * context. * - * @return the SocketFactory object + * @return the {@code SocketFactory} object + * @throws UnsupportedOperationException if the underlying provider + * does not implement the operation. * @throws IllegalStateException if the SSLContextImpl requires - * initialization and the engineInit() + * initialization and the {@code engineInit()} * has not been called * @see javax.net.ssl.SSLContext#getSocketFactory() */ protected abstract SSLSocketFactory engineGetSocketFactory(); /** - * Returns a ServerSocketFactory object for + * Returns a {@code ServerSocketFactory} object for * this context. * - * @return the ServerSocketFactory object + * @return the {@code ServerSocketFactory} object + * @throws UnsupportedOperationException if the underlying provider + * does not implement the operation. * @throws IllegalStateException if the SSLContextImpl requires - * initialization and the engineInit() + * initialization and the {@code engineInit()} * has not been called * @see javax.net.ssl.SSLContext#getServerSocketFactory() */ protected abstract SSLServerSocketFactory engineGetServerSocketFactory(); /** - * Creates a new SSLEngine using this context. + * Creates a new {@code SSLEngine} using this context. *

* Applications using this factory method are providing no hints * for an internal session reuse strategy. If hints are desired, @@ -86,9 +90,9 @@ * Some cipher suites (such as Kerberos) require remote hostname * information, in which case this factory method should not be used. * - * @return the SSLEngine Object + * @return the {@code SSLEngine} Object * @throws IllegalStateException if the SSLContextImpl requires - * initialization and the engineInit() + * initialization and the {@code engineInit()} * has not been called * * @see SSLContext#createSSLEngine() @@ -98,7 +102,7 @@ protected abstract SSLEngine engineCreateSSLEngine(); /** - * Creates a SSLEngine using this context. + * Creates a {@code SSLEngine} using this context. *

* Applications using this factory method are providing hints * for an internal session reuse strategy. @@ -108,9 +112,9 @@ * * @param host the non-authoritative name of the host * @param port the non-authoritative port - * @return the SSLEngine Object + * @return the {@code SSLEngine} Object * @throws IllegalStateException if the SSLContextImpl requires - * initialization and the engineInit() + * initialization and the {@code engineInit()} * has not been called * * @see SSLContext#createSSLEngine(String, int) @@ -120,19 +124,19 @@ protected abstract SSLEngine engineCreateSSLEngine(String host, int port); /** - * Returns a server SSLSessionContext object for + * Returns a server {@code SSLSessionContext} object for * this context. * - * @return the SSLSessionContext object + * @return the {@code SSLSessionContext} object * @see javax.net.ssl.SSLContext#getServerSessionContext() */ protected abstract SSLSessionContext engineGetServerSessionContext(); /** - * Returns a client SSLSessionContext object for + * Returns a client {@code SSLSessionContext} object for * this context. * - * @return the SSLSessionContext object + * @return the {@code SSLSessionContext} object * @see javax.net.ssl.SSLContext#getClientSessionContext() */ protected abstract SSLSessionContext engineGetClientSessionContext(); diff -r 1c0a1cee6054 -r 5b500c93ce48 jdk/src/java.base/share/classes/javax/net/ssl/SSLEngine.java --- a/jdk/src/java.base/share/classes/javax/net/ssl/SSLEngine.java Wed Jul 05 20:36:16 2017 +0200 +++ b/jdk/src/java.base/share/classes/javax/net/ssl/SSLEngine.java Wed Jul 05 20:37:12 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,15 +37,15 @@ *

* The secure communications modes include: